diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml index c3efd0a..b00fe88 100644 --- a/accumulo-handler/pom.xml +++ b/accumulo-handler/pom.xml @@ -112,6 +112,12 @@ org.apache.hadoop + hadoop-client + ${hadoop-20S.version} + true + + + org.apache.hadoop hadoop-core ${hadoop-20S.version} true @@ -123,6 +129,12 @@ org.apache.hadoop + hadoop-client + ${hadoop-23.version} + true + + + org.apache.hadoop hadoop-common ${hadoop-23.version} true diff --git a/beeline/pom.xml b/beeline/pom.xml index 45fa02b..9a1c5dc 100644 --- a/beeline/pom.xml +++ b/beeline/pom.xml @@ -49,6 +49,11 @@ hive-shims ${project.version} + + org.apache.hive + hive-jdbc + ${project.version} + commons-cli @@ -88,12 +93,6 @@ org.apache.hive - hive-jdbc - ${project.version} - test - - - org.apache.hive hive-exec ${project.version} tests diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java index bead16c..e3c2449 100644 --- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java +++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java @@ -692,10 +692,6 @@ int initArgs(String[] args) { int code = 0; if (!commands.isEmpty()) { - // for single command execute, disable color - getOpts().setColor(false); - getOpts().setHeaderInterval(-1); - for (Iterator i = commands.iterator(); i.hasNext();) { String command = i.next().toString(); debug(loc("executing-command", command)); diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java b/beeline/src/java/org/apache/hive/beeline/Commands.java index a92d69f..7e366dc 100644 --- a/beeline/src/java/org/apache/hive/beeline/Commands.java +++ b/beeline/src/java/org/apache/hive/beeline/Commands.java @@ -38,6 +38,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.sql.SQLWarning; import java.util.Arrays; import java.util.Iterator; import java.util.LinkedList; @@ -47,10 +48,13 @@ import java.util.TreeSet; import org.apache.hadoop.hive.common.cli.ShellCmdExecutor; +import org.apache.hive.jdbc.HiveStatement; public class Commands { private final BeeLine beeLine; + private static final int DEFAULT_QUERY_PROGRESS_INTERVAL = 1000; + private static final int DEFAULT_QUERY_PROGRESS_THREAD_TIMEOUT = 10 * 1000; /** * @param beeLine @@ -728,7 +732,7 @@ private boolean execute(String line, boolean call) { beeLine.handleException(e); } - + line = line.trim(); if (line.endsWith(";")) { line = line.substring(0, line.length() - 1); } @@ -758,6 +762,7 @@ private boolean execute(String line, boolean call) { try { Statement stmnt = null; boolean hasResults; + Thread logThread = null; try { long start = System.currentTimeMillis(); @@ -767,7 +772,15 @@ private boolean execute(String line, boolean call) { hasResults = ((CallableStatement) stmnt).execute(); } else { stmnt = beeLine.createStatement(); - hasResults = stmnt.execute(sql); + if (beeLine.getOpts().isSilent()) { + hasResults = stmnt.execute(sql); + } else { + logThread = new Thread(createLogRunnable(stmnt)); + logThread.setDaemon(true); + logThread.start(); + hasResults = stmnt.execute(sql); + logThread.interrupt(); + } } beeLine.showWarnings(); @@ -782,6 +795,11 @@ private boolean execute(String line, boolean call) { beeLine.info(beeLine.loc("rows-selected", count) + " " + beeLine.locElapsedTime(end - start)); } finally { + if (logThread != null) { + logThread.join(DEFAULT_QUERY_PROGRESS_THREAD_TIMEOUT); + showRemainingLogsIfAny(stmnt); + logThread = null; + } rs.close(); } } while (BeeLine.getMoreResults(stmnt)); @@ -792,6 +810,13 @@ private boolean execute(String line, boolean call) { + " " + beeLine.locElapsedTime(end - start)); } } finally { + if (logThread != null) { + if (!logThread.isInterrupted()) { + logThread.interrupt(); + } + logThread.join(DEFAULT_QUERY_PROGRESS_THREAD_TIMEOUT); + showRemainingLogsIfAny(stmnt); + } if (stmnt != null) { stmnt.close(); } @@ -803,6 +828,61 @@ private boolean execute(String line, boolean call) { return true; } + private Runnable createLogRunnable(Statement statement) { + if (statement instanceof HiveStatement) { + final HiveStatement hiveStatement = (HiveStatement) statement; + + Runnable runnable = new Runnable() { + @Override + public void run() { + while (hiveStatement.hasMoreLogs()) { + try { + // fetch the log periodically and output to beeline console + for (String log : hiveStatement.getQueryLog()) { + beeLine.info(log); + } + Thread.sleep(DEFAULT_QUERY_PROGRESS_INTERVAL); + } catch (SQLException e) { + beeLine.error(new SQLWarning(e)); + return; + } catch (InterruptedException e) { + beeLine.debug("Getting log thread is interrupted, since query is done!"); + return; + } + } + } + }; + return runnable; + } else { + beeLine.debug("The statement instance is not HiveStatement type: " + statement.getClass()); + return new Runnable() { + @Override + public void run() { + // do nothing. + } + }; + } + } + + private void showRemainingLogsIfAny(Statement statement) { + if (statement instanceof HiveStatement) { + HiveStatement hiveStatement = (HiveStatement) statement; + List logs; + do { + try { + logs = hiveStatement.getQueryLog(); + } catch (SQLException e) { + beeLine.error(new SQLWarning(e)); + return; + } + for (String log : logs) { + beeLine.info(log); + } + } while (logs.size() > 0); + } else { + beeLine.debug("The statement instance is not HiveStatement type: " + statement.getClass()); + } + } public boolean quit(String line) { beeLine.setExit(true); diff --git a/bin/beeline.cmd b/bin/beeline.cmd index 0438e56..8687444 100644 --- a/bin/beeline.cmd +++ b/bin/beeline.cmd @@ -43,7 +43,22 @@ if not exist %HADOOP_HOME%\libexec\hadoop-config.cmd ( @rem supress the HADOOP_HOME warnings in 1.x.x set HADOOP_HOME_WARN_SUPPRESS=true call %HADOOP_HOME%\libexec\hadoop-config.cmd -set CLASSPATH=%CLASSPATH%;%HIVE_HOME%\lib\*; + +@rem include only the beeline client jar and its dependencies +pushd %HIVE_HOME%\lib +for /f %%a IN ('dir /b hive-beeline-**.jar') do ( + set CLASSPATH=%CLASSPATH%;%HIVE_HOME%\lib\%%a +) +for /f %%a IN ('dir /b super-csv-**.jar') do ( + set CLASSPATH=%CLASSPATH%;%HIVE_HOME%\lib\%%a +) +for /f %%a IN ('dir /b jline-**.jar') do ( + set CLASSPATH=%CLASSPATH%;%HIVE_HOME%\lib\%%a +) +for /f %%a IN ('dir /b hive-jdbc-**-standalone.jar') do ( + set CLASSPATH=%CLASSPATH%;%HIVE_HOME%\lib\%%a +) +popd call %JAVA_HOME%\bin\java %JAVA_HEAP_MAX% %HADOOP_OPTS% -classpath %CLASSPATH% org.apache.hive.beeline.BeeLine %* diff --git a/bin/ext/beeline.sh b/bin/ext/beeline.sh index 6c0435d..ddd5906 100644 --- a/bin/ext/beeline.sh +++ b/bin/ext/beeline.sh @@ -19,11 +19,17 @@ export SERVICE_LIST="${SERVICE_LIST}${THISSERVICE} " beeline () { CLASS=org.apache.hive.beeline.BeeLine; - execHiveCmd $CLASS "$@" + + # include only the beeline client jar and its dependencies + beelineJarPath=`ls ${HIVE_LIB}/hive-beeline-*.jar` + superCsvJarPath=`ls ${HIVE_LIB}/super-csv-*.jar` + jlineJarPath=`ls ${HIVE_LIB}/jline-*.jar` + jdbcStandaloneJarPath=`ls ${HIVE_LIB}/hive-jdbc-*-standalone.jar` + export HADOOP_CLASSPATH=${beelineJarPath}:${superCsvJarPath}:${jlineJarPath}:${jdbcStandaloneJarPath} + + exec $HADOOP jar ${beelineJarPath} $CLASS $HIVE_OPTS "$@" } beeline_help () { - CLASS=org.apache.hive.beeline.BeeLine; - execHiveCmd $CLASS "--help" + beeline "--help" } - diff --git a/common/pom.xml b/common/pom.xml index ad9f6c0..01c74ba 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -72,6 +72,12 @@ + com.google.code.tempus-fugit + tempus-fugit + ${tempus-fugit.version} + test + + junit junit ${junit.version} diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index fa71f0e..5e180a6 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -649,6 +649,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { HIVEJOINCACHESIZE("hive.join.cache.size", 25000, "How many rows in the joining tables (except the streaming table) should be cached in memory."), + // CBO related + HIVE_CBO_ENABLED("hive.cbo.enable", false, "Flag to control enabling Cost Based Optimizations using Optiq framework."), + // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row, // need to remove by hive .13. Also, do not change default (see SMB operator) HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""), @@ -1051,7 +1054,7 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" + "The optimization will be automatically disabled if number of reducers would be less than specified value."), - HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", true, + HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", false, "When enabled dynamic partitioning column will be globally sorted.\n" + "This way we can keep only one record writer open for each partition value\n" + "in the reducer thereby reducing the memory pressure on reducers."), @@ -1196,13 +1199,6 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "Average row size is computed from average column size of all columns in the row. In the absence\n" + "of column statistics and for variable length complex columns like map, the average number of\n" + "entries/values can be specified using this config."), - // to accurately compute statistics for GROUPBY map side parallelism needs to be known - HIVE_STATS_MAP_SIDE_PARALLELISM("hive.stats.map.parallelism", 1, - "Hive/Tez optimizer estimates the data size flowing through each of the operators.\n" + - "For GROUPBY operator, to accurately compute the data size map-side parallelism needs to\n" + - "be known. By default, this value is set to 1 since optimizer is not aware of the number of\n" + - "mappers during compile-time. This Hive config can be used to specify the number of mappers\n" + - "to be used for data size computation of GROUPBY operator."), // statistics annotation fetches stats for each partition, which can be expensive. turning // this off will result in basic sizes being fetched from namenode instead HIVE_STATS_FETCH_PARTITION_STATS("hive.stats.fetch.partition.stats", true, @@ -1384,6 +1380,8 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "authorization manager class name to be used in the metastore for authorization.\n" + "The user defined authorization class should implement interface \n" + "org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider. "), + HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads", true, + "If this is true, metastore authorizer authorizes read actions on database, table"), HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager", "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", "authenticator manager class name to be used in the metastore for authentication. \n" + @@ -1479,10 +1477,10 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false, "Where to insert into multilevel directories like\n" + "\"insert directory '/HIVEFT25686/chinna/' from table\""), - HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS("hive.warehouse.subdir.inherit.perms", false, - "Set this to true if the the table directories should inherit the\n" + - "permission of the warehouse or database directory instead of being created\n" + - "with the permissions derived from dfs umask"), + HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS("hive.warehouse.subdir.inherit.perms", true, + "Set this to false if the table directories should be created\n" + + "with the permissions derived from dfs umask instead of\n" + + "inheriting the permission of the warehouse or database directory."), HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true, "whether insert into external tables is allowed"), @@ -1513,8 +1511,8 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery."), // HiveServer2 global init file location HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location", "${env:HIVE_CONF_DIR}", - "The location of HS2 global init file (.hiverc).\n" + - "If the property is reset, the value must be a valid path where the init file is located."), + "Either the location of a HS2 global init file or a directory containing a .hiverc file. If the \n" + + "property is set, the value must be a valid path to an init file or directory where the init file is located."), HIVE_SERVER2_TRANSPORT_MODE("hive.server2.transport.mode", "binary", new StringSet("binary", "http"), "Transport mode of HiveServer2."), HIVE_SERVER2_THRIFT_BIND_HOST("hive.server2.thrift.bind.host", "", @@ -1722,6 +1720,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { HIVE_VECTORIZATION_REDUCE_ENABLED("hive.vectorized.execution.reduce.enabled", true, "This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\n" + "The default value is true."), + HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED("hive.vectorized.execution.reduce.groupby.enabled", true, + "This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\n" + + "The default value is true."), HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000, "Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed."), HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000, diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveChar.java b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveChar.java index 63f0394..f3b4729 100644 --- a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveChar.java +++ b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveChar.java @@ -18,10 +18,19 @@ package org.apache.hadoop.hive.common.type; -import junit.framework.TestCase; +import com.google.code.tempusfugit.concurrency.annotations.*; +import com.google.code.tempusfugit.concurrency.*; +import org.junit.*; +import static org.junit.Assert.*; -public class TestHiveChar extends TestCase { +public class TestHiveChar { + @Rule public ConcurrentRule concurrentRule = new ConcurrentRule(); + @Rule public RepeatingRule repeatingRule = new RepeatingRule(); + + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testBasic() { HiveChar hc = new HiveChar("abc", 10); assertEquals("abc ", hc.toString()); @@ -47,6 +56,9 @@ public void testBasic() { assertEquals(3, hc.getCharacterLength()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testStringLength() { HiveChar hc = new HiveChar(); @@ -60,6 +72,9 @@ public void testStringLength() { assertEquals("0123456789 ", hc.toString()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testComparison() { HiveChar hc1 = new HiveChar(); HiveChar hc2 = new HiveChar(); diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java index 46a73f2..959989a 100644 --- a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java +++ b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java @@ -20,12 +20,19 @@ import java.math.BigDecimal; import java.math.BigInteger; -import org.junit.Assert; -import org.junit.Test; +import com.google.code.tempusfugit.concurrency.annotations.*; +import com.google.code.tempusfugit.concurrency.*; +import org.junit.*; +import static org.junit.Assert.*; public class TestHiveDecimal { + @Rule public ConcurrentRule concurrentRule = new ConcurrentRule(); + @Rule public RepeatingRule repeatingRule = new RepeatingRule(); + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testPrecisionScaleEnforcement() { String decStr = "1786135888657847525803324040144343378.09799306448796128931113691624"; HiveDecimal dec = HiveDecimal.create(decStr); @@ -82,6 +89,8 @@ public void testPrecisionScaleEnforcement() { } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testMultiply() { HiveDecimal dec1 = HiveDecimal.create("0.00001786135888657847525803"); HiveDecimal dec2 = HiveDecimal.create("3.0000123456789"); @@ -105,6 +114,8 @@ public void testMultiply() { } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testPow() { HiveDecimal dec = HiveDecimal.create("3.00001415926"); Assert.assertEquals(dec.pow(2), dec.multiply(dec)); @@ -118,6 +129,8 @@ public void testPow() { } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testDivide() { HiveDecimal dec1 = HiveDecimal.create("3.14"); HiveDecimal dec2 = HiveDecimal.create("3"); @@ -133,6 +146,8 @@ public void testDivide() { } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testPlus() { HiveDecimal dec1 = HiveDecimal.create("99999999999999999999999999999999999"); HiveDecimal dec2 = HiveDecimal.create("1"); @@ -145,6 +160,8 @@ public void testPlus() { @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testSubtract() { HiveDecimal dec1 = HiveDecimal.create("3.140"); HiveDecimal dec2 = HiveDecimal.create("1.00"); @@ -152,6 +169,8 @@ public void testSubtract() { } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testPosMod() { HiveDecimal hd1 = HiveDecimal.create("-100.91"); HiveDecimal hd2 = HiveDecimal.create("9.8"); @@ -160,12 +179,16 @@ public void testPosMod() { } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testHashCode() { Assert.assertEquals(HiveDecimal.create("9").hashCode(), HiveDecimal.create("9.00").hashCode()); Assert.assertEquals(HiveDecimal.create("0").hashCode(), HiveDecimal.create("0.00").hashCode()); } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testException() { HiveDecimal dec = HiveDecimal.create("3.1415.926"); Assert.assertNull(dec); @@ -174,6 +197,8 @@ public void testException() { } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testBinaryConversion() { testBinaryConversion("0.00"); testBinaryConversion("-12.25"); diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveVarchar.java b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveVarchar.java index f8da48d..309d042 100644 --- a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveVarchar.java +++ b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveVarchar.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hive.common.type; -import junit.framework.TestCase; import org.apache.hadoop.hive.common.type.HiveVarchar; import org.apache.hadoop.hive.common.LogUtils; @@ -28,8 +27,15 @@ import java.io.InputStreamReader; import java.util.Random; +import com.google.code.tempusfugit.concurrency.annotations.*; +import com.google.code.tempusfugit.concurrency.*; +import org.junit.*; +import static org.junit.Assert.*; + +public class TestHiveVarchar { + @Rule public ConcurrentRule concurrentRule = new ConcurrentRule(); + @Rule public RepeatingRule repeatingRule = new RepeatingRule(); -public class TestHiveVarchar extends TestCase { public TestHiveVarchar() { super(); } @@ -65,6 +71,9 @@ public static int getRandomCodePoint(int excludeChar) { } } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testStringLength() throws Exception { int strLen = 20; int[] lengths = { 15, 20, 25 }; @@ -124,6 +133,9 @@ public void testStringLength() throws Exception { assertEquals(5, vc1.getCharacterLength()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testComparison() throws Exception { HiveVarchar hc1 = new HiveVarchar("abcd", 20); HiveVarchar hc2 = new HiveVarchar("abcd", 20); diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java index c965ce6..7806466 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleAvg.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.contrib.udaf.example; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDAF; import org.apache.hadoop.hive.ql.exec.UDAFEvaluator; @@ -32,6 +33,8 @@ * more efficient. * */ +@Description(name = "example_avg", +value = "_FUNC_(col) - Example UDAF to compute average") public final class UDAFExampleAvg extends UDAF { /** diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleGroupConcat.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleGroupConcat.java index e2680ac..cdcec43 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleGroupConcat.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleGroupConcat.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collections; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDAF; import org.apache.hadoop.hive.ql.exec.UDAFEvaluator; @@ -35,6 +36,8 @@ * implement built-in aggregation functions, which are harder to program but * more efficient. */ +@Description(name = "example_group_concat", +value = "_FUNC_(col) - Example UDAF that concatenates all arguments from different rows into a single string") public class UDAFExampleGroupConcat extends UDAF { /** diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMaxN.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMaxN.java index 7bc19d9..ccb36d0 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMaxN.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMaxN.java @@ -19,11 +19,13 @@ package org.apache.hadoop.hive.contrib.udaf.example; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDAF; /** * Returns the max N double values. */ +@Description(name = "example_max_n", value = "_FUNC_(expr) - Example UDAF that returns the max N double values") public class UDAFExampleMaxN extends UDAF { /** diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMinN.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMinN.java index 56ba3b6..947167a 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMinN.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udaf/example/UDAFExampleMinN.java @@ -19,11 +19,13 @@ package org.apache.hadoop.hive.contrib.udaf.example; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDAF; /** * Returns the min N double values. */ +@Description(name = "example_min_n", value = "_FUNC_(expr) - Example UDAF that returns the min N double values") public class UDAFExampleMinN extends UDAF{ /** diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleAdd.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleAdd.java index 719c3e1..18b1df6 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleAdd.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleAdd.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.hive.contrib.udf.example; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; /** * UDFExampleAdd. * */ +@Description(name = "example_add", value = "_FUNC_(expr) - Example UDAF that returns the sum") public class UDFExampleAdd extends UDF { public Integer evaluate(Integer... a) { diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleArraySum.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleArraySum.java index 879e77e..9a1a382 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleArraySum.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleArraySum.java @@ -19,12 +19,14 @@ import java.util.List; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; /** * UDFExampleArraySum. * */ +@Description(name = "example_arraysum", value = "_FUNC_(expr) - Example UDAF that returns the sum") public class UDFExampleArraySum extends UDF { public Double evaluate(List a) { diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleFormat.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleFormat.java index a92ad70..bc54e3c 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleFormat.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleFormat.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.hive.contrib.udf.example; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; /** * UDFExampleFormat. * */ +@Description(name = "example_format", value = "_FUNC_(expr) - Example UDAF that returns formated String") public class UDFExampleFormat extends UDF { public String evaluate(String format, Object... args) { diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleMapConcat.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleMapConcat.java index a13c05a..6b7360f 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleMapConcat.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleMapConcat.java @@ -21,12 +21,15 @@ import java.util.Collections; import java.util.Map; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; /** * UDFExampleMapConcat. * */ +@Description(name = "example_mapconcat", +value = "_FUNC_(expr) - Example UDAF that returns contents of Map as a formated String") public class UDFExampleMapConcat extends UDF { public String evaluate(Map a) { diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleStructPrint.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleStructPrint.java index 299b3f0..a4fc796 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleStructPrint.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleStructPrint.java @@ -19,12 +19,15 @@ import java.util.List; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; /** * UDFExampleStructPrint. * */ +@Description(name = "example_structprint", +value = "_FUNC_(obj) - Example UDAF that returns contents of an object") public class UDFExampleStructPrint extends UDF { public String evaluate(Object a) { diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/GenericUDTFCount2.java b/contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/GenericUDTFCount2.java index 032322a..8094946 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/GenericUDTFCount2.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/GenericUDTFCount2.java @@ -20,6 +20,7 @@ import java.util.ArrayList; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; @@ -34,6 +35,8 @@ * to test outputting of rows on close with lateral view. * */ +@Description(name = "udtfCount2", +value = "_FUNC_(col) - UDF outputs the number of rows seen, twice.") public class GenericUDTFCount2 extends GenericUDTF { private transient Integer count = Integer.valueOf(0); diff --git a/contrib/src/test/results/clientpositive/udf_example_add.q.out b/contrib/src/test/results/clientpositive/udf_example_add.q.out index 6325d00..7916679 100644 --- a/contrib/src/test/results/clientpositive/udf_example_add.q.out +++ b/contrib/src/test/results/clientpositive/udf_example_add.q.out @@ -25,36 +25,24 @@ SELECT example_add(1, 2), FROM src LIMIT 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003 (type: double), 6.6 (type: double), 11.0 (type: double), 10.4 (type: double) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 500 Data size: 22000 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 1 - Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: 1 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 3 (type: int), 6 (type: int), 10 (type: int), 3.3000000000000003 (type: double), 6.6 (type: double), 11.0 (type: double), 10.4 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 500 Data size: 22000 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: SELECT example_add(1, 2), example_add(1, 2, 3), diff --git a/contrib/src/test/results/clientpositive/udf_example_format.q.out b/contrib/src/test/results/clientpositive/udf_example_format.q.out index c589ebb..34b10c4 100644 --- a/contrib/src/test/results/clientpositive/udf_example_format.q.out +++ b/contrib/src/test/results/clientpositive/udf_example_format.q.out @@ -19,36 +19,24 @@ SELECT example_format("abc"), FROM src LIMIT 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 'abc' (type: string), '1.1' (type: string), '1.1 1.200000e+00' (type: string), 'a 12 10' (type: string) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 182500 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 1 - Statistics: Num rows: 1 Data size: 365 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 365 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: 1 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'abc' (type: string), '1.1' (type: string), '1.1 1.200000e+00' (type: string), 'a 12 10' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 500 Data size: 182500 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 365 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: SELECT example_format("abc"), example_format("%1$s", 1.1), diff --git a/data/files/avro_charvarchar.txt b/data/files/avro_charvarchar.txt new file mode 100644 index 0000000..4e9a867 --- /dev/null +++ b/data/files/avro_charvarchar.txt @@ -0,0 +1,4 @@ +a |a |k1:v1|101,x200|10,abcdef +ab|ab |k2:v123456|102,y200|10,abc +abc|abc|k3:v1234|103,200|10,a +abcdefghijklm|abcdefghijklmnop|k9:v12|109,200|10, abcdef diff --git a/data/files/cbo_t1.txt b/data/files/cbo_t1.txt new file mode 100644 index 0000000..e8034a4 --- /dev/null +++ b/data/files/cbo_t1.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true +1,1,1,1,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t2.txt b/data/files/cbo_t2.txt new file mode 100644 index 0000000..34633d3 --- /dev/null +++ b/data/files/cbo_t2.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +1,1,1,1,true +2,2,2,2,true +1,1,1,1,true +2,2,2,2,true +1,1,1,1,true +2,2,2,2,true +1,1,1,1,true +2,2,2,2,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t3.txt b/data/files/cbo_t3.txt new file mode 100644 index 0000000..a9c995e --- /dev/null +++ b/data/files/cbo_t3.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +1,1,1,1,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t4.txt b/data/files/cbo_t4.txt new file mode 100644 index 0000000..56e0794 --- /dev/null +++ b/data/files/cbo_t4.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +1,1,1,1,true +2,2,2,2,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t5.txt b/data/files/cbo_t5.txt new file mode 100644 index 0000000..245b1b9 --- /dev/null +++ b/data/files/cbo_t5.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +5,5,5,5,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +5,5,5,5,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/cbo_t6.txt b/data/files/cbo_t6.txt new file mode 100644 index 0000000..dd72edd --- /dev/null +++ b/data/files/cbo_t6.txt @@ -0,0 +1,20 @@ +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true +5,5,5,5,true +6,6,6,6,true +1,1,1,1,true +2,2,2,2,true +3,3,3,3,true +4,4,4,4,true + 1, 1,1,1,true + 1, 1,1,1,true + 1 , 1 ,1,1,true + 1 , 1 ,1,1,true +1 ,1 ,1,1,true +1 ,1 ,1,1,true +1,1,1,1,false +1,1,1,1,false +null,null,null,null,null +null,null,null,null,null diff --git a/data/files/dynpart_test.txt b/data/files/dynpart_test.txt new file mode 100644 index 0000000..ab6cd4a --- /dev/null +++ b/data/files/dynpart_test.txt @@ -0,0 +1,24 @@ +24526172.99-11.32 +245261710022.633952.8 +24526172.1-2026.3 +2452617552.96-1363.84 +24526171765.07-4648.8 +2452617879.07-2185.76 +24526177412.832071.68 +245261785.825.61 +2452617565.92196.48 +24526175362.01-600.28 +24526173423.95-3164.07 +24526384133.98-775.72 +245263810171.1660.48 +2452638317.87-3775.38 +2452638156.67-4626.56 +24526381327.0857.97 +24526381971.35-488.25 +2452638181.03-207.24 +2452638267.01-3266.36 +24526380.15-241.22 +24526381524.33494.37 +2452638150.39-162.12 +24526381413.19178.08 +24526384329.49-4000.51 diff --git a/data/files/location.txt b/data/files/location.txt new file mode 100644 index 0000000..b733f9f --- /dev/null +++ b/data/files/location.txt @@ -0,0 +1,20 @@ +CAUSA100 +CAUSA100 +CAUSA100 +CAUSA100 +CAUSA100 +CAUSA100 +CAUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 +ILUSA100 diff --git a/data/files/parquet_types.txt b/data/files/parquet_types.txt index 750626e..d342062 100644 --- a/data/files/parquet_types.txt +++ b/data/files/parquet_types.txt @@ -1,21 +1,21 @@ -100|1|1|1.0|0.0|abc|2011-01-01 01:01:01.111111111|a |a -101|2|2|1.1|0.3|def|2012-02-02 02:02:02.222222222|ab |ab -102|3|3|1.2|0.6|ghi|2013-03-03 03:03:03.333333333|abc|abc -103|1|4|1.3|0.9|jkl|2014-04-04 04:04:04.444444444|abcd|abcd -104|2|5|1.4|1.2|mno|2015-05-05 05:05:05.555555555|abcde|abcde -105|3|1|1.0|1.5|pqr|2016-06-06 06:06:06.666666666|abcdef|abcdef -106|1|2|1.1|1.8|stu|2017-07-07 07:07:07.777777777|abcdefg|abcdefg -107|2|3|1.2|2.1|vwx|2018-08-08 08:08:08.888888888|bcdefg|abcdefgh -108|3|4|1.3|2.4|yza|2019-09-09 09:09:09.999999999|cdefg|abcdefghijklmnop -109|1|5|1.4|2.7|bcd|2020-10-10 10:10:10.101010101|klmno|abcdedef -110|2|1|1.0|3.0|efg|2021-11-11 11:11:11.111111111|pqrst|abcdede -111|3|2|1.1|3.3|hij|2022-12-12 12:12:12.121212121|nopqr|abcded -112|1|3|1.2|3.6|klm|2023-01-02 13:13:13.131313131|opqrs|abcdd -113|2|4|1.3|3.9|nop|2024-02-02 14:14:14.141414141|pqrst|abc -114|3|5|1.4|4.2|qrs|2025-03-03 15:15:15.151515151|qrstu|b -115|1|1|1.0|4.5|tuv|2026-04-04 16:16:16.161616161|rstuv|abcded -116|2|2|1.1|4.8|wxy|2027-05-05 17:17:17.171717171|stuvw|abcded -117|3|3|1.2|5.1|zab|2028-06-06 18:18:18.181818181|tuvwx|abcded -118|1|4|1.3|5.4|cde|2029-07-07 19:19:19.191919191|uvwzy|abcdede -119|2|5|1.4|5.7|fgh|2030-08-08 20:20:20.202020202|vwxyz|abcdede -120|3|1|1.0|6.0|ijk|2031-09-09 21:21:21.212121212|wxyza|abcde +100|1|1|1.0|0.0|abc|2011-01-01 01:01:01.111111111|a |a |k1:v1|101,200|10,abc +101|2|2|1.1|0.3|def|2012-02-02 02:02:02.222222222|ab |ab |k2:v2|102,200|10,def +102|3|3|1.2|0.6|ghi|2013-03-03 03:03:03.333333333|abc|abc|k3:v3|103,200|10,ghi +103|1|4|1.3|0.9|jkl|2014-04-04 04:04:04.444444444|abcd|abcd|k4:v4|104,200|10,jkl +104|2|5|1.4|1.2|mno|2015-05-05 05:05:05.555555555|abcde|abcde|k5:v5|105,200|10,mno +105|3|1|1.0|1.5|pqr|2016-06-06 06:06:06.666666666|abcdef|abcdef|k6:v6|106,200|10,pqr +106|1|2|1.1|1.8|stu|2017-07-07 07:07:07.777777777|abcdefg|abcdefg|k7:v7|107,200|10,stu +107|2|3|1.2|2.1|vwx|2018-08-08 08:08:08.888888888|bcdefg|abcdefgh|k8:v8|108,200|10,vwx +108|3|4|1.3|2.4|yza|2019-09-09 09:09:09.999999999|cdefg|abcdefghijklmnop|k9:v9|109,200|10,yza +109|1|5|1.4|2.7|bcd|2020-10-10 10:10:10.101010101|klmno|abcdedef|k10:v10|110,200|10,bcd +110|2|1|1.0|3.0|efg|2021-11-11 11:11:11.111111111|pqrst|abcdede|k11:v11|111,200|10,efg +111|3|2|1.1|3.3|hij|2022-12-12 12:12:12.121212121|nopqr|abcded|k12:v12|112,200|10,hij +112|1|3|1.2|3.6|klm|2023-01-02 13:13:13.131313131|opqrs|abcdd|k13:v13|113,200|10,klm +113|2|4|1.3|3.9|nop|2024-02-02 14:14:14.141414141|pqrst|abc|k14:v14|114,200|10,nop +114|3|5|1.4|4.2|qrs|2025-03-03 15:15:15.151515151|qrstu|b|k15:v15|115,200|10,qrs +115|1|1|1.0|4.5|qrs|2026-04-04 16:16:16.161616161|rstuv|abcded|k16:v16|116,200|10,qrs +116|2|2|1.1|4.8|wxy|2027-05-05 17:17:17.171717171|stuvw|abcded|k17:v17|117,200|10,wxy +117|3|3|1.2|5.1|zab|2028-06-06 18:18:18.181818181|tuvwx|abcded|k18:v18|118,200|10,zab +118|1|4|1.3|5.4|cde|2029-07-07 19:19:19.191919191|uvwzy|abcdede|k19:v19|119,200|10,cde +119|2|5|1.4|5.7|fgh|2030-08-08 20:20:20.202020202|vwxyz|abcdede|k20:v20|120,200|10,fgh +120|3|1|1.0|6.0|ijk|2031-09-09 21:21:21.212121212|wxyza|abcde|k21:v21|121,200|10,ijk diff --git a/data/files/vectortab2k b/data/files/vectortab2k new file mode 100644 index 0000000..36ea3b3 --- /dev/null +++ b/data/files/vectortab2k @@ -0,0 +1,2000 @@ +111|||2607|5765.39|3882592.64|3785719054585.463867|true|mathematics|nick xylophone|2065-08-13 19:03:52|2018-11-14 17:26:37.322428829|2086-05-13 +-87|21091|-1012329052|9182828596851990528|22311.90|2883584.50|3563455992720.427734|true|quiet hour|david quirinius|2047-08-30 02:59:28|2062-07-07 09:32:33.387727593|2083-11-21 +-58|-31395|-1730740504|302|11322.18|-2081693.61|-1639547871112.334473|true|industrial engineering|xavier laertes|2057-09-02 10:26:29|2074-04-07 03:59:19.110762829| +-19|133|95356298|78|-15490.77|-642668.92||true|american history|tom ovid|2059-06-14 15:58:37|2021-07-06 11:56:50.11392413| +95|27922|-2027812975|3764|-38980.40|-497550.81|4616197451686.900391|false|education|wendy davidson|2031-12-06 02:13:55|2037-04-01 15:48:02.707720905|2013-05-01 +23|-9171|-805288503|7432428551399669760|-35391.71|-1387292.76|-4384441919303.938477||nap time|xavier hernandez|2069-02-22 06:47:15|2068-04-02 03:24:11.192596144|2048-06-21 +7|725||3118|-12519.93|-1305193.84|3984781579512.529297|true|forestry|david davidson|2071-01-16 20:21:17|2049-05-02 13:53:42.230477366|2030-02-20 +52|8918|-772236518|7250237407877382144|32666.20|-3997512.40|-4361210532142.081055|true|religion||2068-10-07 03:23:30||2010-12-13 +54|-30304|-146961490|8100036735858401280|38097.34|300734.43|-101907680156.507812|false|wind surfing|alice ellison|2076-07-17 05:35:58|2017-10-19 13:37:33.31074556|1985-06-25 +36|-10317|-538812082|7792036342592348160|36810.38|1666710.20|1617263763890.299805|true|debate|xavier king||2049-11-23 23:25:42.906731900|2025-10-11 +-92|21849|868714547|-7496839341561954304|32711.55|-2738225.86|-946920531679.183105|false|biology|zach falkner|2064-05-11 04:54:01|2041-04-22 19:12:27.123637103|2020-04-05 +-127|7353|1008698636|1719|4983.60|-2636583.28|2290285251062.656250|false|american history|ethan polk|2029-05-28 08:57:08|2071-11-04 02:44:19.546016087|2048-06-29 +73|16195|737149747|8283099811330506752|5758.00|2121137.53|-3867208889795.392578|true|philosophy|sarah davidson|2061-05-23 23:39:02|2064-12-20 12:52:14.436538366|2053-04-28 +-46|-28501|810157660|2241|-7769.30|-3860829.99|4232305555838.919922|true|biology|wendy falkner|2054-06-13 14:12:16|2044-03-24 22:32:03.722424719| +31|-9609|1541249928|7659279803863146496|-32124.85|1848079.75|-1733147315988.326172|true|philosophy|calvin miller|2058-02-09 03:37:29|2039-10-26 21:00:53.243928394|2030-11-17 +-35|||1075|21713.21|-4409416.25|-433003793989.392578|false|opthamology||2026-04-19 20:19:23|2060-01-21 11:16:28.358392262|1983-12-12 +48|7401|476704350|-7911421221625077760|-23364.57|393045.55|-3706576226699.773438|false|zync studies|sarah polk|2064-11-08 08:38:31|2047-09-13 00:05:07.657220269|2073-09-23 +-29|14773|-1974777102|1774|-41874.29|360339.96|4724948953312.201172|false|undecided|oscar ellison|2058-08-24 22:30:53|2023-10-22 21:22:34.254262972|2064-04-02 +-57|30921|-522450861|8895174927321243648|-7382.06|-4926003.80|4192159288015.525391|true|topology|david white|2027-03-09 17:30:45|2032-03-22 06:16:46.756458466|2077-08-03 +|26557|-407089271|661|-38119.53|-51958.20|-4230513318073.718750||quiet hour|wendy carson|2016-08-23 22:18:05|2079-03-05 14:50:42.389412815|2102-03-14 +-4||273256071|-9014145341570203648|-14145.11|-3116102.10|2704318642566.079102|false||ulysses white|2050-06-25 15:49:09|2013-10-28 19:25:59.70638641|2032-03-19 +47|23441|-978892011|-7049618574399692800|1466.49|-2506000.67|-1261885101463.384766|false|study skills|xavier underhill|2053-03-08 05:42:09|2059-11-24 17:10:42.836803274|2012-02-18 +55|-1928|174310705|-6935038507792801792|-4574.16|-2096084.32||false|topology|tom king|2068-11-10 06:35:37|2075-12-17 14:54:53.484250504|2010-11-13 +-43|2671|851975276|504|35338.95|2409466.90|-4397204039138.387207|false|industrial engineering|xavier xylophone|2044-09-15 08:07:20|2066-11-14 20:08:56.457818450|2097-09-25 +40|-21772|-1832606512|-8831091081349758976|8799.89|1118606.84|868268330921.963867|false|wind surfing|jessica van buren|2031-08-02 13:03:07|2066-10-02 06:56:23.275227665|2089-06-05 +-37|20704|-1506324615|923||-4894306.72|3900883250375.564453|false|education|alice ellison|2078-01-19 06:32:53|2034-07-13 13:55:48.524380282| +||-419335927|3263|-9973.58|379816.01|-4708070356227.338867|true|religion|sarah van buren|2061-02-13 08:24:35|2074-03-23 07:11:45.666149042|1994-04-22 +114|-2828|-1144976744|289|-21745.11|1431965.10|-654012413621.529785|true|history|katie robinson|2075-12-28 00:49:24|2028-08-01 10:04:22.203514303|2047-09-09 +-103|-20934|1107757211|8509508263705477120||507403.73|-4227549209611.147949||zync studies|zach robinson|2062-09-02 06:50:21||2084-08-14 +-87|||345|-31059.05|-4822074.38|2868093144922.561523|true|education|zach miller|2055-05-09 16:32:47|2015-11-28 12:47:47.540035115|2049-05-06 +83|-5635|-42151403|281|5719.35|-1928099.64|3951352468527.070312|false|philosophy|calvin young|2016-07-13 04:11:42|2025-05-12 16:31:50.778958822|2036-08-04 +-51|-20411|929560791|3637|48956.95|-1601399.55|1334219192158.307617|true|values clariffication|david polk|2032-08-10 12:27:00|2059-05-03 02:46:31.520651892|2048-12-21 +-86|8488|-425196209|-9066993118333706240|28889.32|3917802.65||true|opthamology|sarah nixon|2050-01-29 20:33:45|2052-07-20 02:09:30.619526537|2029-03-12 +77|-11232|1426152053|7354813692542304256|2007.63|-4307343.30|4686486642663.113281|false|study skills|bob falkner|2018-03-02 05:13:42|2070-06-20 06:01:30.717133325|2042-09-29 +||546555204|2029|6253.69|2528579.30|-3261089774688.485840|false||victor thompson||2060-02-01 14:16:38.477867192|2081-09-16 +-44|-16218||-8807361476639629312|36511.51|286248.72|2844066824227.447266|false|religion|xavier van buren|2039-09-01 16:08:42|2065-11-28 01:54:34.188054521|2039-07-18 +-23|27169|821316302|1371|-21871.99|-3722153.69|-545562668559.717773|false|undecided|mike brown|2063-04-07 13:36:45|2069-02-11 21:42:06.394121082|2054-04-18 +-75|23220|-1421860505|-7221474017515347968|-11848.57|-4271448.15|-1871570979952.359375|false|mathematics|yuri nixon|2025-10-02 14:38:27|2014-08-18 11:46:05.790786201|2037-05-04 +23||-677778959|8871707618793996288|-37545.98|2442119.38|-555577054032.596680|false|kindergarten|calvin quirinius|2041-09-15 03:23:20|2017-07-16 08:23:28.728239344|2014-06-03 +14|-20192|563507584|7198687580227043328|36052.76|-1306614.08|-1346869656422.100586|true|chemistry|katie robinson|2063-06-16 08:30:34|2057-04-22 20:48:38.16105051|2080-07-26 +-71|30090|1050809633|1990|32100.40|1500446.78|3795668572538.915039|false|wind surfing|tom garcia|2053-07-13 17:41:24|2019-12-19 14:37:16.693138309|2018-01-30 +-96||-1565671389|8984935029383389184|28720.33|-3180309.64|4280974782610.193359|false|xylophone band|bob underhill|2078-08-06 00:23:13|2034-07-26 23:30:44.175867724|1989-03-16 +74|23177|1910930064|815|45521.78|-4243565.50|-2179991053030.186035|true|religion|jessica steinbeck|2027-08-11 19:58:53|2025-12-23 12:56:48.785116575|2076-03-16 +-6|21162|1941527322|2056|18542.60|96721.07|60247412756.062500|true|values clariffication|katie ellison|2027-03-11 08:49:50|2014-05-26 15:11:45.90866024|2099-05-09 +20|-24115|684561551|8290944180915871744|-32692.85|-793214.39|158268139075.410156|true|philosophy|oscar van buren|2071-11-06 04:15:33|2078-07-06 07:11:57.809667477|2002-07-08 +121|-18533|167432368|228|-49580.62|-1199504.10|-4410893311038.144531|true|wind surfing|priscilla hernandez|2049-03-14 00:55:26|2015-08-24 11:31:28.973482642|2038-10-27 +-93|-28769|-463071187||29111.14|3936159.86|855085840126.683594|false|yard duty|quinn young|2074-10-25 23:48:59|2068-09-06 20:05:31.931744370|2092-10-19 +-95|5736|1204325852|3460|35891.97|3800244.68|-1017339551408.831055|true|philosophy|calvin ovid|2014-10-27 17:15:46|2074-07-15 23:20:20.972096957|1983-04-05 +-25|-11760|-2069439395||-44985.09|132699.25|1886345168609.331055|true|quiet hour|nick steinbeck|2029-03-23 20:58:01|2031-01-08 05:13:27.828333067|1980-03-09 +9|-18796|654939016|-8345065519816695808|32691.31|-2340544.58||false|yard duty|katie steinbeck|2056-01-19 20:32:40|2068-12-31 18:53:27.897916012|2095-05-24 +-115|28393|2080249726|-7576194692683563008|4313.15|311917.45|-2770003968115.233398|false|nap time|ethan robinson|2030-10-10 13:39:56|2044-09-10 23:59:14.562909594|2055-12-02 +100|6743|1565313938|-8782213262837530624|-964.34|-4407852.66|-773857309922.618652|true|biology|oscar carson|2044-11-17 07:46:19|2053-01-01 14:45:15.764196054| +80|-25780|1017953606|7220131672176058368|-35258.17|1908273.76|1088509311003.029297|true|values clariffication|gabriella brown|2075-10-25 05:21:06|2031-10-26 08:37:10.578092307| +110|-11008|-2009569943|7452756603516190720|3399.76|1741339.95|-2575772562204.221680|false|education|alice laertes|2058-10-01 22:58:51|2081-02-24 17:48:22.155034035|2032-11-07 +-38|-15115|-916344293|7410872053689794560|-26858.87|-818719.64|2489707283313.293945|true|wind surfing|nick thompson|2014-06-23 11:31:25|2038-07-05 23:22:59.392777497|2054-03-11 +0|27116||-7357888618985873408|42003.58|1662438.31|-2435727088707.188965|false|linguistics|mike zipper|2033-07-29 19:41:57|2079-03-13 01:50:52.671710709| +|2955|-1463884101|-7409317158045442048|-8186.07|-2572292.78|-2934889488954.759766|false|yard duty|katie ichabod|2015-01-24 13:02:54|2042-06-16 20:51:00.958638266|1994-06-02 +-30|-12628|868717604|3472|-38487.44|-4162489.36|-4161258810464.367676|true|industrial engineering||2023-12-05 22:32:04|2022-06-14 23:40:08.41754293|2094-06-06 +59|27852|-1830870295|2494|-24567.97|-929319.13|-3421625759809.039062|true||david quirinius|2074-01-14 13:16:59|2028-01-07 18:05:38.773104432|2002-07-10 +78|-7850|1677494300|-8992599250893979648|44585.81|-2982899.63|-597466124925.460938|true|forestry|victor van buren|2017-03-29 22:51:02|2068-08-17 17:51:55.371885346|2028-09-02 +-117|-21922|-799249885|8014986215157530624||2470347.06||true|chemistry|quinn ichabod|2029-05-06 15:39:17|2024-11-10 13:44:54.55064479|1994-03-27 +48|28558|-78240945|3159|-33993.69|889632.88|3293895628016.016602|false|opthamology|yuri underhill|2020-10-08 08:30:26|2019-11-20 03:42:51.136962161|1988-05-08 +-76|-3008|-1904737684|||4481669.11|-2023909790198.992188|false|joggying|ethan nixon|2026-11-16 23:20:05|2034-08-07 18:06:43.803487119|1985-04-16 +112|-6874|203688965|7436133434239229952|7968.70|4867019.14|-1079601478463.523438|true|quiet hour|yuri garcia|2022-06-09 22:16:54|2030-01-13 10:59:10.162327595|2025-08-01 +-23|-3520|1464695860||41789.11|-3616352.29|3476169263477.153320|false|forestry|alice hernandez|2045-06-14 13:53:38|2027-07-20 19:10:56.885557429|2072-09-25 +97|-14537|-1358159222|-8845239510002753536|30582.49|772376.99|-2651348117038.914062|false|forestry|quinn king|2016-07-21 22:10:27|2063-04-22 11:22:26.510916644|2044-01-22 +-88|198|323919214|8345435427356090368|35725.39|530232.57|4000300362554.191406|true|debate|sarah davidson|2052-02-29 16:23:06|2065-11-02 15:21:22.820623860|2016-12-30 +-19|-32480|2126491387|9094945190752903168|-24257.03|-4364624.36|1971729105497.779297|||oscar underhill|2044-04-04 07:26:25|2033-02-14 16:22:07.768207723|2105-12-20 +80|-22426|-235819331|-9078662294976061440|-13063.45|-639370.11|-2899905469957.140625|false|study skills|yuri xylophone|2054-03-25 06:38:11|2023-07-21 09:30:19.636575309|2043-03-22 +33|20884|-909127123|7689489436826804224|-20048.83|-4996960.35|2669260655452.099609|true|geology|rachel xylophone|2076-08-27 00:00:51|2031-07-31 16:48:51.285046883|2037-03-17 +-20|15688|-1967660827|7620183559667081216|-31608.43|-3778691.05|-253958738593.276367|false|linguistics||2065-07-05 19:36:50|2029-01-01 03:06:13.533276880|2105-01-24 +-1|-5593|2058640744|-9071565764086521856|-25936.66||-2624029717623.415039|true|forestry|wendy polk|2034-11-26 03:16:39|2073-01-17 06:48:04.722179257|1970-02-07 +-21|-23540|1678261510|2092|-44267.84|-3266915.53|136230451259.180664|false|yard duty|wendy davidson|2050-05-05 09:31:02|2070-04-03 08:45:42.349508679|1976-07-06 +-15|-24576|1022707418|1481|46316.04|825604.05|-4275490669201.076172|true|yard duty||2014-06-03 17:24:32|2014-11-29 21:31:04.183698999|2015-04-13 +-108|-18292|-934008333|8939431770838810624|-19049.65|-3634517.80|308587454891.693359|||ethan ovid|2066-11-25 08:03:39|2046-11-27 13:27:02.11530030|1992-12-03 +96|-11047|-2096425960|803|4967.48|-2777770.11|4037681204968.351562|false|linguistics|gabriella polk|2019-09-16 21:26:32|2047-10-24 22:32:11.31036942|1984-08-10 +71|||-8730803262481580032|16660.42|3752718.33|-311540696207.970703|true|history|ethan van buren|2043-08-10 22:21:54|2074-06-06 06:33:09.229039028|1976-08-29 +-52|-18385|-774406989|8201303040648052736|7836.59|-2532286.37|1913657229799.596680|true|quiet hour|oscar zipper|2039-08-18 17:59:50|2024-02-20 12:49:38.156348041| +-11|1847|22308780|-7953426740065312768||-2185323.72|-4536408641834.341797|false|philosophy|mike ovid|2046-04-01 23:23:25|2030-11-15 02:16:54.219567283|1995-07-03 +-103|-3091|-392713245|-8099313480512716800|40508.97|-2375645.47|-3996919734918.407227|false|chemistry|tom carson|2038-02-20 05:19:48|2057-02-04 09:09:17.378101856|2076-01-14 +|3694|-1404921781|1972|-31368.73|3279707.14|-2805368749215.979492|true|yard duty|rachel miller|2066-10-28 12:26:53|2065-05-14 17:44:00.934322881|2055-10-15 +121|-30677|-1061222139|7933040277013962752|-46396.26|958738.91|501053268792.510742|true|topology|xavier garcia|2066-03-24 23:40:52|2047-12-05 05:59:03.709642655|2041-01-13 +18|-1660|-1955545912|-9189155542884474880|-7714.06|881042.86|2065189794939.748047|false|topology|irene ellison|2026-11-08 06:16:55|2025-05-17 11:03:47.185223340|2074-07-12 +-42|9028|1621606222|2434|4438.76|-2964984.97|-2918353603075.267578|true|topology|priscilla young|2033-05-06 00:00:14|2023-10-29 17:43:13.522556162|2028-03-26 +124||978044705|7888238729321496576|-24208.95||-4850732296584.077148|false|undecided|jessica white|2020-12-07 05:16:00|2072-01-10 09:27:24.503516138|2019-05-19 +56|5599|-665623523|-8240034910581153792|-2335.60|202813.82|3851526875785.361328|true|nap time|sarah johnson|2021-03-07 18:22:05|2049-10-01 06:32:49.357282514|2018-10-27 +-102|-13722|259524903||-16413.09|-625420.83|3535592833060.938477||yard duty|bob nixon|2066-09-13 11:21:42|2066-04-06 02:06:21.730154464| +-56|11238|410340192|3567|29934.62|-2105876.12|2566162905785.595703|false|quiet hour|calvin brown|2018-12-30 00:00:33|2053-12-25 09:43:49.544264273|2095-02-28 +-111|-797|1412102605|7296164580491075584|31824.08|-886591.25|-2874808713941.199219|false|education|ulysses hernandez|2055-02-28 04:35:16|2032-02-26 21:07:51.12262410|2098-06-15 +-42|10479|860658464|268|32754.56|-2369331.68|654615772986.453125|true|forestry|fred brown|2024-01-02 15:21:18|2023-03-29 02:26:33.829299976|2062-03-23 +|30052|-1728171376|-8654433008222797824|35386.65|-1875563.10|-3891050509015.840820|true|forestry|rachel carson|2018-04-15 00:41:38|2016-03-28 02:03:24.513570827|2060-09-17 +-101|-32455|516843026|2229|-36732.79|-4985474.88||true|nap time|victor ichabod|2051-12-24 02:43:52|2069-12-02 02:14:56.589108032|2030-01-26 +-75|-15431|877053605|-8783777723063099392|-10499.63|678272.19|3041525159920.972656|false|history|holly falkner|2080-07-05 04:43:29|2034-10-26 21:04:12.771747509|2075-09-17 +4|8948|1211873318|2897|-17416.18|-4925512.63|-4514053790870.740234|true|opthamology|oscar ellison|2019-04-11 13:20:29|2026-07-07 02:42:24.833351047|2048-12-23 +-58|1393|1848935036|7989160253372817408|47928.52|-853795.31|-1657076687161.556152|true|forestry|mike van buren|2061-05-06 21:57:53|2058-06-06 16:07:39.838916621|2065-11-16 +-87|22557|873035819|-8028910243475038208||-2816366.10|-769631198270.062012|true|debate|calvin johnson|2054-12-07 01:08:19|2080-09-25 11:27:16.9717306|2080-12-04 +83|-10781|-480058682|1286|-17175.04||-2199239017358.216797|true|chemistry|jessica nixon|2040-07-07 10:30:33|2066-09-29 14:31:17.34936376|2096-02-14 +21|30632|2040926345|8677632093825916928|-45759.70|-2898003.58|-4156272991252.514160|false|geology|yuri underhill|2035-02-11 06:39:18|2017-02-20 17:17:37.184850313|2042-05-26 +-73|-6948|1773417290|8011181697250631680|11623.01|-958775.93|-1866254854407.820801|false|joggying|oscar brown|2039-02-19 15:50:12|2018-01-08 03:08:57.403292535|2100-01-30 +60|-19517|-884109192|-7966960765508280320|-10884.65|-3876912.53|-2066003780139.915527|true|yard duty|alice zipper||2062-02-01 03:32:34.383978859|2011-05-28 +-68|-14315|889733679|9185952983951343616|27500.67|3188754.82|4438755017449.623047|false|philosophy|priscilla zipper|2076-07-13 10:52:27|2016-05-03 07:39:40.516896330|2069-09-26 +-95|31125|1036391201|7319711402123149312|31964.96|-1757277.11|-1496962050139.103516|true|kindergarten|victor ovid|2032-12-06 07:37:49|2062-07-26 22:38:29.146178601|2032-04-09 +-94|-6088|1604076720|9119046173224370176|19740.52|-4678968.17|3557302643795.097656|true|american history|david xylophone|2073-01-23 03:17:41|2040-11-18 04:31:12.709589656|2004-09-28 +19|-9243|-1249011023|-8760655406971863040|-32655.55|-1649087.83|932399155849.453125|false|forestry|alice van buren|2018-07-11 06:15:11|2027-04-13 10:06:40.902651522|2019-12-25 +12|7343|1418228573|8190539859890601984|-20276.81|-4235586.20|-1196769263706.656250|true|values clariffication||2070-05-15 09:16:30|2078-03-22 13:36:17.940377243|2037-02-13 +-12|6813|-253084551|3974|40698.97|-1115432.48|-2448300846308.981934|false|wind surfing|wendy ichabod|2022-05-20 11:14:25|2053-10-25 16:58:08.223136198|2097-11-18 +-125|-22641|1597303154|2106|4874.58||-30490934435.612305|false|religion||2015-03-28 09:25:39|2013-03-04 23:58:15.71378943|2078-11-03 +39|8410|1640445482|2579|-14853.40|-3366426.92|1755470105685.397461|false|mathematics|jessica xylophone|||2081-02-20 +|-12755|-1353470095|7210160489915236352||-1899846.57|-653238714138.346680|true|undecided|priscilla xylophone|2022-03-08 18:50:03|2059-09-27 11:04:07.90814681|2011-06-29 +102|28247|794783516|-7907355742053883904|20683.15|2965661.82|-624867269213.389648|false|chemistry|mike van buren|2064-01-12 01:22:43|2055-03-27 10:02:55.174795513|2085-03-01 +114|19513|418182899|1846|-24886.47|-3030589.60|4183756295927.345703|true|education|luke xylophone|2017-10-15 17:18:43|2063-06-10 14:18:48.104797455|2044-04-24 +108|27636|215508794|1189|-25835.00|4453603.41|4405275049808.744141|true|nap time|ulysses van buren|2015-07-02 03:45:28|2013-12-31 08:25:18.118248757|1981-11-06 +23|-19623|-1039040287||-6512.80|-660440.82|-1642262021182.041504|false|geology|xavier quirinius|2080-12-09 03:55:10||1998-04-16 +-53|-4169|879290165|68|34884.11|-1267493.50|-3279094975952.290039|false|industrial engineering|mike young|2075-12-04 18:22:09|2054-10-01 19:12:15.766024626|1977-12-27 +-88|16796|1151752586|-6975459232300236800|-4784.15|-4864373.89|3221553706534.408203|true|topology|tom zipper|2064-04-09 08:01:25|2070-08-17 06:01:00.968011813|2027-04-10 +13|17531|1805308672|-7700203302632210432|-37763.56|3704107.83|3425256239457.610352|false|undecided|xavier thompson|2027-04-18 16:26:01|2024-07-18 19:36:10.425538948|2092-04-17 +-61|32019|1420099773|-8021859935185928192|38974.63|1673755.73|2102524829645.560547|true|chemistry|ulysses falkner|2029-02-04 06:51:54|2046-05-19 15:05:25.732127788|2062-03-10 +7|19302|538268118|7339426767877390336|20672.29|4555902.70||false|topology|fred laertes|2017-01-13 01:23:09|2016-05-16 23:25:47.63891547|1998-12-23 +-114|-28736|449788961|774|-18622.98|-1268938.21|2666440101920.493164|false||priscilla ichabod|2063-12-27 01:29:50|2035-03-18 21:03:10.648253783|2062-10-07 +101||-1914072976|2295|1339.82|4112381.85|-1984449836425.916504|false|study skills|alice ovid||2075-09-26 04:56:48.660923039|2050-07-10 +44|-18090|259204652|-8559252110266564608|-9038.73|-4718571.75|3963517992293.716797|true|forestry|victor white|2066-12-24 15:59:32|2038-03-27 02:23:06.834673196|2096-07-12 +-24|10161|-882028850|3079|-45314.95|1053922.25|1574414367849.214844|false|history|xavier ovid|2052-09-14 19:24:26|2034-04-25 18:26:08.505694709|2040-08-03 +89|-24801|-158233823|1346|42990.33|4421723.61|2978558145914.504883|true|geology||2028-06-15 20:46:41|2071-05-17 17:16:54.148951459|1989-11-05 +31|-31582|1219616145|-8007017894942638080|-44727.28|-4114788.64|-4378058802654.081055|true|american history|quinn quirinius|2060-09-20 08:49:06|2023-11-29 06:13:18.611946024|2019-12-11 +|-19159|1807877618|987|-5972.19|-119465.75|1452475353908.404297|false|quiet hour|holly ichabod|2062-09-02 18:18:39|2070-04-17 08:28:26.5503386|2075-09-26 +27|17023|1747664003|-9049720998034137088|22254.70|3306808.08|3177841668890.231445|false|biology|priscilla ellison|2015-07-10 05:27:46|2027-06-11 15:58:30.658173981|2024-08-23 +|-25344||-8158011642485825536|-48220.79|4820070.80|3955085450779.839844|true|chemistry|victor polk|2055-12-20 16:02:07|2024-09-03 18:32:19.912271231|2011-06-12 +-96|-6950|-1070951602|7697541332524376064|-23329.71|4539092.16|-1684983372492.716797|false|undecided||2069-02-08 07:45:14|2068-03-26 18:14:15.165533561| +-74|4261|217823040|8773222500321361920|-11741.38|1094988.55|3709069978104.153320|true|topology|rachel xylophone|2044-11-15 10:42:51|2059-08-26 20:14:15.307180062|2022-10-12 +-72|-7178|1814570016|-7915999634274369536|30236.61|2437780.95|2666538537304.746094|false|biology|fred carson||2079-01-28 01:30:38.861840968|2077-05-11 +-64|-1725|1493555718|6974475559697768448|-16776.52|-4105739.18|-2442617490401.950195|true|education|katie thompson|2044-03-29 00:20:37|2015-04-18 09:51:54.990681181|2078-01-13 +41|-19270|1119976718|7052226236896256000|28023.88|2110273.07|-4751768490466.296875|false|biology|yuri laertes|2079-09-26 20:39:48|2068-11-17 19:45:06.599632615|1983-05-31 +-58|-6494|1222217404|650|37717.83|107717.64|3643292384671.887695|true|industrial engineering|luke steinbeck|2028-02-25 14:55:40|2074-07-02 03:20:40.417209896|1973-03-04 +-127|-4252||3079|39550.67|-3279977.09|18225457799.800781|false|zync studies|holly zipper|2045-08-14 20:11:34|2024-05-11 15:11:48.691514072|2074-05-09 +-26|10261|914062370|1039||2283246.90|-1345490060709.941895|false|religion|luke robinson|2039-04-22 14:44:00|2053-03-18 12:47:22.448344815|1979-11-27 +-46|7356|683320224|-7610137349734883328|23741.83|-4065786.49|4723266191273.687500|true|chemistry|nick carson|2022-07-31 17:56:09|2024-03-03 22:59:52.779904666|2022-08-11 +48|28309|-570632618|-7526793959592140800|-4768.36|-4329360.25|3073439981743.923828|false|joggying|victor laertes|2034-03-18 16:11:32||2086-05-21 +39|3228|-535056977|8897901899039473664|3865.37|-206033.60|3887729126314.027344||study skills|wendy laertes|2018-03-11 05:09:22|2034-09-09 08:05:25.434342520|1982-04-12 +-73|-1011|-1058166020|8793387410919038976|-9572.10|2650235.31|-3040241790788.478516|false|quiet hour|oscar robinson|2062-03-17 08:07:25|2042-08-08 14:24:00.510838590|2078-05-19 +-65|17073|1111985530|9112400579327483904|46710.33|130023.02|||industrial engineering|wendy falkner|2051-09-14 21:51:35||1994-06-09 +-127|29365|895945459|-8559008501282832384|19094.69||-4601795255939.329102|true|study skills|katie underhill|2062-08-08 00:34:44|2035-03-17 18:33:10.848870183|2087-04-30 +42||-997463353|1788|-7.25|-1624411.04|2061492866787.446289|false|debate|xavier white|2074-08-12 02:19:10|2024-07-12 04:37:59.670091691|2079-05-31 +-24|-13978|-491882534|8554899472487596032|-42196.85|2380170.34|995819157271.000977|true|debate|ethan garcia|2067-04-29 08:24:05|2033-02-23 23:45:43.224368846|2026-11-24 +45|-7865|829101712|-8070535484085895168|-8695.03|1393839.55|-2930125300660.009277|true|religion|calvin garcia|2069-10-10 07:46:10|2030-09-03 02:47:09.999512465|2094-08-04 +34|-32240||-7858505678035951616|21913.94|-4848416.07|-4417461834328.939453|true|yard duty|tom miller|2020-08-15 02:15:04|2049-01-07 01:23:16.815364884|2041-04-17 +82|-4550|-1497098905|1058|19349.22|-3207956.11|2622882069011.147461|false|education|gabriella nixon|2048-06-12 05:15:59|2070-06-08 15:10:15.103401622|2047-07-22 +90|12762|-983874694|-7598782894648565760|-6545.09|4223917.78|-2724893143053.712891|true|biology|wendy carson|2064-09-28 23:47:20|2057-05-10 17:22:29.44470118|2009-10-04 +-52|-15501|-1184620079|7947544013461512192|17444.50|-4019474.98|-4273574634879.970703|true|debate|wendy falkner|2033-04-24 23:46:39|2064-12-09 12:41:18.605098238|1999-04-04 +-121|-19282|787925706||-39608.35|-3490480.46|-1396998641989.632812|true|opthamology|xavier robinson|2050-05-29 18:50:17|2061-07-15 16:24:02.41944450|2064-10-08 +-104|-6736|272086526|8625937019655200768|-42455.39|-2155542.09|-3341536888638.039062|true|undecided|wendy xylophone|2052-10-28 04:50:15||2103-12-05 +-105|-4059|760466914|3701|12507.09|2661135.97|1085001013184.765625|true|history||2056-10-06 10:43:05|2024-10-19 16:47:43.958389682|1991-09-28 +90|-25683|745725681|-7692192232238678016|364.62|-695924.61|-918004138590.973145|false|yard duty|mike polk|2052-05-18 02:04:21||2015-02-25 +-90|9110|1398486099|1291|-18652.98|4330260.30|-2988564000800.586426|false|chemistry|sarah robinson|2030-12-19 06:51:08|2073-06-18 12:33:18.634781669|1999-02-06 +53|24715|476919973|3887|-33746.36|-1812815.81|2248167382355.781250||values clariffication|calvin garcia|2028-06-28 08:30:31|2039-03-21 22:46:13.628314022|1994-05-06 +-112|14286|287239980|1462|23269.50|-3421045.14|-4825619680711.810547|false|geology|katie johnson|2023-07-03 15:05:18|2018-11-09 18:46:47.759800359|1983-09-25 +-36|3213|-1528033060|-8485389240529354752|33155.26|3831347.85|-2604012470736.947266|false|study skills|ulysses ichabod|2039-07-17 08:06:17|2032-05-11 08:35:57.629571826|2033-04-05 +-122|-16872|1520375588|8660248367767076864|-46409.01|1435619.95|-301617019681.543945|true|nap time|david steinbeck|2028-09-21 14:32:03|2053-05-02 22:50:04.195045396|1977-04-22 +|32092|-951728053|-8115963579415650304|17565.73|-4855596.00|-1567705616965.078125|false|wind surfing|ethan ovid|2079-03-11 06:37:16|2080-10-21 08:32:40.577382791|2021-08-16 +108|-6933|-66112513|388|-23820.11|-2660889.99|4673408308800.226562|true|values clariffication|calvin brown|2013-07-21 19:03:27||2078-03-23 +79|-6142|1295073553|-7262049693594943488|34920.29|-2775037.93|-3850646756153.040039||chemistry|sarah king|2062-11-28 06:34:11|2064-12-02 14:20:25.689540381|1970-10-19 +114|17164|-217785690|1343|-22425.89|-4109302.98|1542043552284.902344|true|biology|mike miller|2043-01-06 00:29:33|2027-09-11 18:50:09.224829213|2084-05-01 +79|22366|1632769786|1508|-16193.28|-4581299.04|-4048240005182.783691|true|opthamology|sarah zipper|2027-06-15 04:28:56|2024-11-18 13:11:30.807086190|2094-07-09 +-65|5785|-648766606|1701|-25497.04|3706795.80|4406779957872.437500|true|study skills|tom polk||2047-01-20 19:02:44.296212906|2079-09-24 +-23|-29475|2052773366|8837420822750314496|-44389.81|-3244548.09|-3320189294654.056641|true|xylophone band|katie carson|2014-08-14 04:46:15|2032-01-06 02:52:13.841451923|2008-01-10 +25|24014|-1318045616|-8704234107608203264|-25094.94||1313758909596.565430|false|study skills|irene young|2034-08-06 10:56:24|2050-11-22 00:49:32.763404963| +67|-9614|1124269631|3680|29490.38|-1942538.00|-2214662106936.273926|false|topology|katie ovid||2028-06-06 03:25:20.277859556|2088-10-27 +23|-21708|-94709066|7271887863395459072|-43725.17|-582351.40|-1251943808107.505371|true|mathematics|oscar garcia||2019-08-07 09:24:28.969847804|2091-10-12 +-49|-12071|1384071499|8156018594610790400|44500.74|-423299.81||false|mathematics|zach garcia|2062-01-17 23:46:46|2035-06-22 17:50:21.193860499|2068-12-27 +-34|-27553|1860113703|8708845895460577280||4443055.22|-3152882360663.084961|false|geology|priscilla young|2032-12-02 17:45:28|2030-02-07 12:05:50.442818058|1981-07-26 +||1634441052|2847|-30620.44|-4164391.70|4767856866280.625000|true|kindergarten|rachel steinbeck||2015-02-23 17:38:53.735735894|2038-02-01 +-126|-31537|693331761|2509|-25833.15|-4801160.87|2804616317735.586914|false|kindergarten|priscilla thompson|2046-02-05 15:38:03|2035-04-25 16:30:39.959832372|1983-09-26 +121|-6219|65956045|1987|-13776.59||3855241018435.791016|false|joggying|nick laertes|2076-07-22 18:55:23|2045-05-01 12:49:24.875445|2104-07-12 +-8|30861||8470141334513098752|6021.11|3057994.15||true|undecided|gabriella thompson|2056-03-17 16:36:40|2076-09-17 19:51:03.971998194|2019-06-03 +108|-10687|977292235|2108|34147.75|3835815.78|-3242583557065.046387|true|xylophone band|luke van buren||2026-07-29 12:13:07.350517258|2004-10-09 +-103|5235|283322761|-7623405558242500608|-9121.56|4969099.99|1418203690420.333984|true|history|ethan ichabod|2052-01-27 14:18:09|2076-09-25 20:35:14.535725735|1995-08-27 +-61|3063|-870624802|8687042963221159936|3943.90|-209028.71|-2183718197982.749023|false|joggying|calvin davidson|2034-06-08 16:02:13|2029-09-24 14:25:09.37764381|2105-01-30 +-96|-10516||3568|-19009.76|3239989.12|-4784778822088.213867|true|topology|oscar nixon||2036-02-03 04:58:16.203066567|2065-12-22 +-50|-30748|-1735287250|7746402369011277824|-4179.91|1292492.11|2324680845107.813477|false|mathematics|david allen|2016-06-22 21:51:17|2069-09-12 15:00:57.362462590|2072-09-24 +-103|9969|-1703620970|-8387536830476820480||-3049943.90|4709582975677.548828|false|zync studies|calvin allen||2079-11-27 18:43:43.659151983|2044-09-10 +34|-21818|818313200|3060|-38828.51|-2063369.12|-3583867768957.524902|false|debate|sarah johnson|2064-02-16 11:03:07|2019-10-12 04:27:17.822098134|2004-10-26 +-85|19350|1409872356|-7512289590991544320|-10184.90|3793945.90|-923624349228.369141|false|industrial engineering|katie white|2032-02-14 12:11:48|2061-04-03 12:08:01.5162482|2081-03-17 +-78|-28932|187893585|8235179243092090880|-23609.39|962981.03|2966147671023.726562|false|zync studies|luke ichabod|2017-01-05 14:49:04|2059-12-23 13:06:00.979846594|1993-11-14 +92|2728|398960205|8540237852367446016|-8750.79|-4703249.19|-501603049952.486328|false|nap time|tom king|2030-04-28 01:59:00|2018-08-22 06:45:33.15399240|1977-02-18 +8|6144|1437057145|-7265998318110711808|-21666.49|361449.07|-3148061620108.357422|true|quiet hour|luke johnson|2065-11-30 23:06:00|2052-07-18 07:53:41.806082172|2062-06-04 +-120|26859|-1554325042|583|15171.32|-2158930.81|-301298241024.774414|false|undecided|victor van buren|2050-07-22 13:12:51|2063-08-25 00:41:53.424629732|1979-04-17 +-124|-10095|1033609549|-8318886086186213376||3677625.78|4672389589988.128906|false|history|irene johnson|2056-03-14 14:13:51|2077-11-14 16:16:46.603310412|2084-07-26 +75|-23638||2410||-968637.64|1698116680328.283203|true|debate|rachel allen|2042-10-06 19:18:58|2057-08-09 04:27:05.138333574|1993-11-01 +-59|1361|-360113158|3021|10379.72|-2330239.84|2488834645794.498047|false|opthamology|bob king|2063-06-09 19:51:43|2037-08-18 09:27:00.913006136|2011-10-15 +|3523|661380540|-8018511948141748224|31005.68|-1939975.71|561437334048.342773|true|wind surfing|quinn thompson||1970-01-01 00:00:00.212340798|2086-01-31 +40|-32296|84231802|2886|-4154.25|4389868.65|378820126909.822266|false|american history|holly laertes||2065-08-04 06:59:17.824387481|2014-11-15 +-32|-6637|-1726479726|-7831320202242228224|39531.99|2377538.79|1099330213298.444336|false|debate|victor davidson|2050-04-05 23:38:10|2022-03-12 21:56:07.234951116|2018-10-08 +84|-18387|346562088|7998357471114969088|44620.73|4045062.26|4748679788424.253906|true|kindergarten|zach quirinius|2020-07-07 16:26:44|2047-03-31 09:15:12.887634795|2084-02-05 +-27|-10815|1769324649|7936149988210212864|11543.56|227674.23|-812606022147.335449|true|zync studies|alice johnson|2048-05-19 07:13:08|2062-03-12 06:53:03.331725965|2076-03-18 +81|2643|2140632003|-6970396058557005824|19854.94|2938603.02|562510896793.071289|true|education|david davidson|2068-04-12 18:18:24|2041-04-04 08:06:20.143031301|1972-02-24 +-24||780938234|-7083646746411720704|-16507.75|-3851733.33|1863907193854.252930|true|joggying|luke falkner|2049-02-04 12:28:05|2057-01-09 07:22:38.778950773|2100-08-23 +-101|-8857|-1918651448|-8675661101615489024|43481.62|149175.06|-2178760699903.925293|false|joggying|mike davidson|2058-06-04 16:19:32|2072-08-14 09:56:17.194554282|1976-03-18 +|-10821|770574055|-9126793997498957824|-36806.07|94051.69|3234847750887.297852|true|yard duty||2051-04-08 16:12:47|2075-11-21 21:55:19.18153321|2066-08-28 +42|9158|776606164|2506|-24639.80|-4869417.64|3079389364956.313477|true|industrial engineering|alice laertes|2044-10-22 22:17:52|2074-08-21 15:27:43.96892721|2058-05-29 +|7180|-51612681|7249443195032985600|-7568.74|1202707.37|-2119064600783.993164|false|undecided|calvin ichabod||2022-04-25 10:39:59.377128631|1982-07-06 +29|10891|1153811197|-8243487285852766208|11569.34|4212709.34|1034657320312.746094|false|american history|victor brown|2031-09-30 15:01:06|2025-09-04 14:13:00.976690553|1993-05-12 +-122|7654|-734921821|3941|7256.71|517028.01|-1114345601613.280273|false|topology|zach carson||2070-05-25 06:36:39.297148492|2001-05-07 +-50|-28968|-1460613213|8875745082589929472|-7442.32|-2702250.82||false|philosophy|priscilla brown|2032-06-20 09:31:24|2052-09-03 16:21:52.208215796|2087-08-18 +-112|-29907|206121314|-8705403811649355776|-44201.12|-897624.69|-2173857521374.103027|true|xylophone band|xavier brown|2059-05-21 10:37:20|2065-01-02 03:32:19.285960165|1998-03-18 +-6|17701|-1606567895|8410599906334097408|23194.74|4572852.32|-1064163809823.100098|true|xylophone band|holly nixon|2044-04-30 10:00:19||2011-12-07 +-117|-5869|1523657918|-8400045653258444800|-37316.59|-1140903.56|3668214733218.100586||study skills|fred brown|2066-10-11 19:46:58|2057-04-21 18:12:07.438157094|2072-06-21 +-58|-2118|-37876543|-8859107121649893376|-2682.16|-2318215.28|4457864918964.134766|false|zync studies|bob van buren|2065-12-18 19:50:19|2043-05-16 22:15:58.560013699|2000-07-15 +19|-26060|1425362689|2201|35741.32|-635373.52|-1660120664738.187012|false||mike quirinius|2055-11-26 10:21:07|2043-11-02 23:23:13.175695849|1978-08-05 +-115|1755|596045726|-7000925438663041024|1169.13|1224590.95|4349386575138.148438|true|xylophone band|calvin quirinius|2014-03-11 02:26:36|2019-09-03 01:49:36.181933289|2088-02-28 +-95|-2617|1229172951|490|-18461.73|1170976.95|1480054571327.649414|false|mathematics|quinn hernandez|2021-05-20 06:00:47|2014-10-28 09:16:31.610844395|2080-12-12 +30|-28244|1978200605|1910|7799.30|-123266.43|-1103541868445.567871|false|undecided|quinn underhill|2019-08-08 22:46:59|2059-05-28 04:51:35.498984116|1981-10-31 +-83|-15497|1000106109|8900351886974279680|46362.81|-4358428.80|3426770165622.876953|false||ethan brown|2027-01-01 03:13:01|2063-08-29 02:30:48.169621762|1992-08-21 +-93|4032|879289168|7343171468838567936|29799.26|-3707213.21|-3700102015935.797852||education||2080-10-01 22:58:13|2042-10-22 03:29:04.44238849|2038-02-21 +7|-29895|-464804906|2131|-21839.05|4389713.57|-1713151304480.397949|true|quiet hour|luke ellison|2027-10-30 14:21:42||2066-10-25 +-106|-22071|-1247229632||-38427.52|-2639190.56|-755064468729.002930|true|religion|victor robinson|2044-02-05 12:48:23|2058-01-26 21:26:48.253653944|2010-09-07 +-72|-5907|-211669740|3208|-38197.98|781541.24|-2726366048183.592773|false|industrial engineering|alice allen|2036-11-15 10:48:44|2074-12-03 23:58:03.654571719|2076-11-09 +-8|10600|-1155174991|-7420448501073051648|-48598.14|-2840797.35|966723065681.023438||kindergarten|luke robinson|2075-10-15 20:32:00||1990-02-19 +-89|-8267|786565385|3418|40746.89|-3336502.60|-3655334890183.326172|false|history|quinn van buren|2073-12-08 10:59:27|2059-12-18 19:47:35.135760242|1992-09-21 +58|24847|1115197541|-7030489936116252672|-32967.44|-3216068.70|-4540163781577.817383|true|study skills|ethan quirinius|2057-07-19 17:20:26|2061-01-07 10:20:58.287466414|2058-04-02 +82|||1093|-41542.22|-2852753.19||false|mathematics|oscar ellison|2078-03-12 21:36:59||2040-03-02 +7|17107|-1227085134|454|-45679.81|4280344.76|-4755957353103.070312|true|biology|victor robinson|2075-06-02 02:53:30|2040-12-08 01:16:10.194735899|2020-12-19 +-109|-25692|-714594143|2791|15638.71|4868354.39|-1920412941995.195312|true|yard duty|priscilla brown|2013-03-25 15:38:28|2039-10-25 16:44:04.385364994|2073-11-05 +98|19215|30036142|1252|5512.48|2861312.90|304220501137.706055|true|zync studies|wendy polk|2074-09-18 04:34:04|2042-10-10 04:18:49.518287517|1992-09-29 +-119|-20879|-1900894010|19|-14328.52|3163463.29|-2231521147965.978027||american history|xavier van buren|2051-08-02 17:01:13|2070-06-10 09:08:31.433429764|2049-12-19 +-80|11979|874824958|485|-49403.10|1836049.78|-3891815088473.258301|false|american history|ethan king|2023-03-21 16:09:44|2018-10-09 10:15:27.268042756| +-122|4568|1505665168|-7453525026342617088|-47688.82||-2370722163282.117188|false|mathematics||2043-02-15 19:11:53|2080-04-06 03:34:04.633286901|2079-05-24 +|2677|-2077771325|1604|4199.81|1851095.86|-3091236716436.114258|true|philosophy|fred brown|2051-05-22 03:56:26|2073-04-07 19:52:50.601742131|2023-01-10 +62|-14280|-2043805661|8365058996333953024|11341.10|4541245.28|4402724332930.460938|true|xylophone band|yuri robinson|2080-04-12 01:21:14|2031-04-06 19:14:59.55187421|2031-05-31 +62|13578|738356485||22454.83|-4779.61|-1857979799639.935059|false|nap time|ethan king|2046-10-08 18:53:18|2058-12-03 23:22:35.944838503|1971-07-29 +-48|-22938|340384179|-7877598807023386624|2517.88|-2373948.65|947585735001.510742|false|opthamology|david thompson|2075-07-24 01:40:11|2036-06-14 00:00:15.278560351|2003-09-18 +-108|-23194|-1198465530|3055|-36853.20|2315107.36|-1344917461892.407715|true||rachel quirinius|2041-04-15 03:29:16|2043-07-29 21:24:09.491072788| +-112|19350|-705887590|-9079801920509001728|42578.38|2583540.14|885849762897.948242|false|american history|ethan white|2055-05-21 11:56:02|2059-12-29 12:16:34.533626678|1984-03-14 +33|16430|1347876055|-8465978403747037184||2398238.05|3976258329685.542969|false|undecided|david quirinius|2040-12-15 14:20:04|2059-03-26 08:14:53.410162567|2013-04-21 +|-21574|62293025|2426|14978.09|3616509.09|3825560324453.007812|true|history|sarah falkner|2035-04-19 13:32:57|2061-05-10 03:23:57.557730657|2099-03-03 +5|-6114|198017473|135|-14085.31|2000803.23|-2919243579540.449219|true|american history|nick brown|2073-05-02 15:11:49|2047-05-06 13:18:00.344896111|1997-06-17 +35|28128|1286367391|-7488415863027367936|-26817.08|-797714.37|3875352447190.109375|true|study skills|jessica brown|2072-03-19 14:27:46|2018-12-18 23:20:30.63266134|2080-12-24 +78|-22358|376076075|-9136398397785948160|46846.67|-1700725.96|-899635084786.325684|false|industrial engineering|katie king|2017-10-06 08:33:37|2057-10-12 20:55:36.383445468|1972-02-28 +-34|3009|-1804244259|579||-2740315.70|362659766665.806641|true|zync studies|sarah thompson|2032-07-12 22:13:10|2035-11-06 17:59:27.77357393|1996-08-02 +-103|4081|2083836439|8144552446127972352|36570.50|3555782.96|3609640367332.538086|false|philosophy|sarah davidson|2031-08-26 09:58:56|2023-01-11 17:04:23.449037266|2041-06-07 +39|-16367|1776456512|6933451028794925056|-43403.36|-4299638.88|793504824242.568359|false|forestry|ulysses thompson|2028-12-06 04:06:25|2055-06-24 20:19:48.682407175|1971-01-13 +37|20128||1777|35965.37|-1496116.32|-3499447525525.525391|false|linguistics|calvin robinson|2067-08-21 11:00:04|2033-11-12 22:25:15.735331075|2019-10-13 +45|20254|1033836308|-8927968289860370432|-47362.97|3785304.85|-3394951194042.761230|false|forestry|david thompson|2017-04-09 00:02:35|2054-01-29 09:25:44.815093672| +-99|26910|-571587579|7265141874315517952|-28011.29|-4797236.03|4313736848665.250000|false|wind surfing|katie underhill|2080-01-07 15:47:45|2057-04-03 15:14:56.240582613|2046-03-03 +-118|16437|727802564|4078|32210.62|913010.81|2387469547668.992188|false|undecided|fred ellison|2030-03-31 06:22:45|2061-03-01 05:58:51.835198480|1982-08-19 +98|-11929|596242714|443|-18850.34|-2514812.80|3841757426531.521484|false|undecided|tom miller|2056-04-06 22:21:49|2023-11-11 06:58:15.825259324|2070-01-08 +-1|-22217|-743680989|7391208370547269632|1450.95|3168984.07|422951017333.055664|false|topology|priscilla thompson|2047-05-23 07:19:55|2019-08-24 23:35:52.785182782|2092-08-23 +-23|32589|1575091509|7790728456522784768|43789.16|-3767707.87|449819577744.783203|true|forestry|quinn thompson|2049-08-29 23:03:28|2059-03-17 09:44:04.511510153|2082-09-14 +-48|-22447|669871113|8398862954249560064|45334.49|-168095.10|-95855026425.759766|true|quiet hour|ethan laertes|2026-06-26 02:42:50|2014-08-21 12:14:15.695317873|2075-09-12 +114|27046|397255100|-8836899523028312064|30215.93|-717006.21|-357328163291.050781|true||zach brown|2073-12-21 08:21:58|2061-05-01 00:50:27.746635942|2021-06-09 +9|-11679|-2076886223|-7805985795815342080|19575.50|120994.39|380373641521.103516|true|xylophone band|ethan carson|2076-07-30 01:09:00|2057-09-20 07:35:34.305010925|2065-08-02 +-5|14783|531459992|1439|19351.91|4803315.07|-574114492621.049805|true|topology|jessica thompson|2072-08-04 11:14:51|2025-01-30 01:47:54.411372161|2085-08-12 +-41|22870|177391521|7266437490436341760|12.17|4688982.65|-4069350543654.266602|false|religion|oscar falkner|2016-04-04 12:22:03|2056-09-18 10:09:51.803143104|2104-08-29 +-123|7959|311478497|-8852770376039219200|-44161.63|1078252.98|1685122154602.396484|true|values clariffication|irene hernandez|2022-06-21 13:36:54|2057-10-24 03:22:01.465719271|2102-04-28 +82|-17995|673904922|3119|31236.39|3313603.14|350230667228.643555|true|religion|calvin steinbeck|2035-06-03 08:40:07|2047-07-13 00:30:59.312757955|1979-08-05 +109|-28299|1042237722|2962|-49297.70|2544504.56|1300896197981.842773|false|kindergarten|gabriella falkner|2045-09-01 02:14:09|2049-12-12 02:33:51.210858024|2041-12-08 +|7483|-1974257754|9001907486943993856|-39292.53|2604166.52|2628816035521.843750|false|mathematics|jessica nixon||2024-04-14 05:51:51.799661240|1973-10-11 +37|29639|218917585|-7378096180613840896|2468.85|545061.80|4576650304788.998047|false|joggying|quinn ellison|2030-03-18 10:56:59|2064-04-10 13:26:19.857498352|1973-05-29 +54|13145||7091300332052062208|-17351.53|4747854.89|3817727970940.189453|true|topology|oscar thompson|2065-09-11 19:52:17|2038-02-26 14:34:48.662632111|2098-03-08 +79|-4491|-559270035|-7818454479651135488|32811.79|-2589530.66|3650613746347.026367|false|forestry|zach polk|2062-04-28 22:14:44|2053-08-03 04:16:15.344431898| +123|31427|-1131684944|-8431492599012163584|48223.45|-4797838.11|4766118630785.527344|false|philosophy|quinn white|2035-12-20 20:20:18|2055-03-06 04:00:57.170922096|2071-06-23 +-57|-3602|-373034494|-8521578237232529408|35310.58|523737.60|4138846175212.621094||industrial engineering|ethan polk||2070-05-20 09:18:23.921779415|2089-12-19 +-6|-16722|-490337498|1926|43246.64|-22165.47|-624395488497.526855|false|nap time|jessica falkner|2070-09-14 21:01:42|2024-10-17 22:24:01.153750276|2069-10-18 +64|-3219|-1829691116|7490717730239250432|46586.47|-1915228.77|1287655745842.997070|true|wind surfing|fred ellison|2060-10-10 10:17:38|2080-01-01 16:19:34.640061653|2095-02-04 +-26|-20949||-9187662685618348032|43398.02|-49310.50|1863250559721.372070|true|study skills|katie carson|2048-04-09 07:40:02|2073-09-18 13:35:45.258178237|1984-12-02 +-50|13078|135341845|2016|47991.75||-3445060192851.059570|false|zync studies|rachel young|2024-12-31 09:01:02||1970-08-12 +-41|27758|1646811064|1153|-4313.26|-3049537.40|-69377778943.611328|true|topology|katie johnson|2041-02-02 14:22:59|2041-06-19 10:09:46.112258643|2041-07-24 +-70|26232|-1447263708|2277|41062.58|-2943975.09|-2631454750987.565430|false|linguistics|yuri miller|2051-01-04 12:34:39|2075-09-08 01:57:00.107926096|2070-02-10 +|-22858|1743696703|3300|-4380.97|-2069211.91|-4596153868922.030273|false|study skills|rachel van buren|2080-08-26 15:15:51|2030-04-01 16:16:55.346669591|2015-09-27 +-40|3354|1303632852|3244|-21821.23|3976574.38|-4697749109705.853516|false|zync studies|quinn robinson|2071-08-03 03:03:18|2033-11-19 04:14:03.372021127|2093-10-02 +-56|11120|1949494660|7107604675626008576|11034.18|3802098.40|-3918244512545.996094|false|wind surfing|calvin nixon|2022-03-17 12:57:17|2020-11-30 18:42:55.661360529|2038-10-05 +-57|-20329|-1857500489|1198|13491.58|-2693679.28|755404222558.269531|true|mathematics|oscar ellison||2033-01-29 05:36:21.574403565|2092-09-14 +|-6735|-2138343289|3401|-22767.90|2032418.93|2618674044147.155273|false|xylophone band|xavier davidson|2019-01-23 07:48:23|2018-12-23 15:42:16.611413141|2046-12-15 +-22|10761|-1226425562|7718825401976684544|8162.40|1047714.74|-2476610981011.973145|false|opthamology|mike robinson|2057-11-26 17:53:27|2069-08-23 05:01:36.124435431|2092-08-24 +-40|29502|-809805200|-8518258741831680000||1144686.92|-4590760977006.702148|true|mathematics||2036-10-09 18:06:29|2064-10-14 13:41:49.171097321|2081-04-05 +50|24081|-728541537||48961.12|-4012888.15|1070384818656.826172|true|topology|victor xylophone|2016-05-04 04:35:48|2038-11-09 14:41:33.447548472|2051-02-08 +102|-6073|1464703053|2004|-38318.30|3968580.02||false|yard duty|david white|2046-02-20 04:57:04|2060-04-05 20:06:25.19768150|2102-06-28 +-41|-11863|-116484575|7801697837312884736|38211.68|-480570.59|-1334633602495.500977|true|nap time|ulysses quirinius|2036-02-23 15:54:13|2068-06-07 20:02:06.356997102|2088-09-29 +-59|-28646|1797164732|3949|18310.99|4375064.78|2366334406338.168945|true|american history|holly thompson|2027-01-27 17:59:41|2045-11-11 14:23:14.608629099|1987-09-11 +123|16767|1198701102|7762823913046556672||1643132.07|2312892585063.259766|true|linguistics|sarah underhill|2017-01-27 09:35:20|2035-06-28 17:21:09.561662434|2086-02-05 +127|-14936|1772349172|-7884460946615984128|-20111.34|-2887093.65|-2318194071335.035645|false|history|victor van buren|2018-11-16 12:15:48|2065-12-31 08:22:47.150581738|2100-05-20 +18|2612|1373871781|-7255010240787030016|49301.50|3253928.69|-4736789216462.362305|true|biology|ethan ellison|2049-01-22 13:36:56|2015-03-06 18:11:17.822359715|2103-01-05 +109||1647411522|-7262384251828518912|3972.39|154588.69||false||mike ellison|2015-10-15 23:12:55|2063-04-26 00:01:09.31563180|2026-05-22 +107|-2027|-1856034030|9132009829414584320|36330.70|2284543.68|-3148453530441.202148|true|study skills|gabriella nixon|2031-01-12 14:14:17|2050-02-14 06:21:51.178052619|2016-01-05 +7|-12626||7573530789362262016||-1103225.79|-3059007558357.649414|true|industrial engineering|katie robinson|2032-10-19 18:30:33|2043-10-22 16:20:40.509237844|1984-06-13 +49||-36038293|7157247449513484288|||3344159968330.622070|true|geology|holly allen|2072-05-19 04:19:02|2064-02-17 13:02:26.377162854|2082-10-30 +37|-26666|-890552359|3493|-41018.63|-3192687.31|610319388952.653320|false|kindergarten|sarah young|2066-06-27 21:59:53|2041-09-23 12:15:37.914672228|2101-03-11 +-43|-12811|1265528735|7491898395977523200|-17199.24|2696923.64|3366546613003.044922|true|undecided|ethan robinson|2029-10-16 04:28:37|2069-03-21 18:01:09.359881056|2039-11-24 +126|2683|-1462604138|-7276111129363046400|12788.08|-3553203.52|2029170799165.562500|true|linguistics||2061-01-10 10:34:14|2036-06-23 07:16:14.299460550|2072-11-12 +-109|23031|1517488324|275|30188.31|-3437436.90|72393656889.993164|false|forestry|luke quirinius|2073-09-17 10:02:35|2058-07-18 11:06:36.866728379|2072-05-24 +-119|-21412|962712814|2715|-42015.52|-3416767.70|2730111077795.705078|true|nap time|ulysses brown|2031-03-11 11:56:59|2035-09-11 18:57:20.545053849|2009-02-02 +-109|-32022|793047956|8150115791664340992|-13856.97|3208380.07|3846344885594.810547|false|education|mike laertes|2037-10-15 17:01:28|2070-05-09 00:38:50.111446261|2025-09-01 +-1|-20591|972835688|7844258063629852672|-30899.74|2941710.70|2520339817545.540039|true|joggying|victor quirinius|2053-05-10 11:05:39|2025-06-02 14:48:41.118807009| +-86|-28097|291866793|1095|-13029.79|2395802.44|4638765048183.728516|false|debate|nick hernandez|2064-08-23 06:30:52||2103-11-18 +|16439|1972940844|8759089349412847616||2775872.41|2078988628132.627930|true|philosophy|nick quirinius|2020-10-01 01:39:49|2046-10-08 04:45:36.155827749|2046-06-24 +8|-7230|-2065080832|950|3517.45|-355907.54|-3463587144624.789062|true|biology|priscilla falkner|2077-07-28 16:36:07|2034-01-22 01:11:00.675336432|2051-01-03 +50|-11769|-558456218|6960137166475911168|1455.89|-3580235.81|-2225358531562.626953|true|undecided|yuri xylophone|2028-03-16 13:31:23|2036-10-04 05:21:52.917455779|1983-01-13 +-92|1558||-7612455481940246528|20027.51|4940501.37|-2309132393323.310059|true|american history|alice king|2023-02-04 21:40:55|2072-10-28 16:02:05.785926481|1980-05-03 +-110|18346|-1603374745|7471208109437304832|2980.46|-4262671.69|-3375577964698.521484|true|philosophy|alice zipper|2061-06-20 11:36:22|2080-05-22 04:06:26.257944476|2080-04-10 +-28|-21648|1081920048|1501|-39377.99|3185244.69|1714008069386.992188|false|values clariffication|tom thompson|2063-04-15 02:39:09|2075-09-24 04:37:07.641525192|2084-05-31 +26|29245|-817093900|-7145585429014888448|29563.03|2338619.47|-47947487681.199219||geology|priscilla polk|2015-11-23 03:51:26|2048-05-27 06:59:27.15089547|2053-05-23 +90|-3222|1301426600|7794244032613703680|43143.95|4941656.20|3845904537462.523438|true|chemistry|irene hernandez|2054-10-20 19:50:57|2075-08-30 11:18:34.922622163|2047-02-03 +-35|27983|975932228|-7510418793070075904|-31952.67|-547797.70|4033293860789.123047|true|geology|victor carson|2042-12-09 03:28:14|2065-01-16 03:24:08.971263541|2010-06-30 +-14|-29239|230954385|7983789401706094592|28755.14|2806157.04|-3764755036302.638184|true||wendy robinson|2022-06-26 13:18:28|2033-05-18 03:11:50.871005511|2007-03-28 +-86|-15393|-1392487784|3990|25365.72|-3122775.81|-3136333430552.511719|true|study skills|irene carson|2075-08-29 11:01:05|2058-01-25 11:54:54.863071401|1985-03-10 +58|-31551|1443426396|3830|-44944.38||-1901078252690.346680|false|mathematics||2069-02-25 02:44:41|2026-03-16 18:07:16.176024280|2087-02-16 +-79|-12463|-37773326|-7152177800841502720|15979.17|136523.07|4328797369585.437500|false|zync studies|fred hernandez|2074-01-21 22:42:03|2063-04-17 17:43:27.97971264|1986-09-21 +-125|-29323|44009986|1856|36983.95|841608.25||false|history|rachel young|2052-06-19 09:14:41|2025-12-14 16:53:02.735673012|2076-10-09 +-62|-28146|-901778330|2712|-29272.48|495232.86|-4971306749304.673828|false|values clariffication|david young|2023-08-22 20:04:18|2018-02-08 01:53:45.121138509|2067-01-28 +26|-24538|1767359228|-8746702976270385152|-10262.64|3827137.43|738589831875.913086|false|wind surfing|zach brown|2013-07-02 04:00:34|2039-08-29 12:57:02.351355581|2047-07-31 +21|7128|-217930632|-7903158849011843072|777.32|-3589619.96|424579951542.725586|true|philosophy|rachel robinson|2046-01-04 01:02:07|2066-12-14 05:45:23.618651463|2006-08-23 +-87|19739|-1699049982|2244|4616.60|2733769.47|2986145815124.956055|true|linguistics|yuri carson|2043-05-07 16:06:28|2076-01-07 23:49:22.432671324|2089-08-31 +66|29834|1307148254|2735|42117.25|1129420.19|4620474372066.283203|false|kindergarten|jessica xylophone|2051-08-08 09:05:51|2048-06-06 23:50:37.973877723|2060-05-08 +77|21511||1168|47905.76|-2302110.36|-2332600454045.881348|false|debate|mike van buren||2046-10-07 14:52:17.37357221|2001-12-10 +-31|-9482|1330219997|-9080956291212132352|26375.90|-1329189.97|1655214180391.766602|false|chemistry|holly xylophone|2051-07-18 20:15:33|2058-08-16 06:28:29.740534617|1982-12-07 +-43|27454|219415594|945|-12127.36|4216836.73|3646917581760.433594|false|education|calvin steinbeck|2076-07-10 08:53:35|2018-10-06 03:52:24.596896729|1983-09-04 +55|-24267|1458051497|8417381121663746048|49672.46|2222816.47|-3645327331789.474121|true|yard duty|wendy garcia|2036-06-01 21:18:52|2018-11-11 07:00:13.274801916|1996-09-30 +-13|-32541|1563120121|3823|28411.76|1170745.89|2011473770962.695312|false|undecided|nick polk|2023-03-23 06:07:51|2076-10-16 02:23:25.407392909|1985-12-24 +103|18395|-2144138362|-7868306678534193152|21455.88|-4870777.55|-2770023875366.263184|false|american history|victor laertes|2047-01-10 23:37:37|2032-02-23 13:39:32.775684381|2052-11-07 +-23|17964|-1100641049|-8518060755719585792|42499.60||-3519839102751.412109|false|chemistry|luke miller|2015-03-11 11:14:33|2068-07-24 11:17:54.14270195|2080-02-08 +20|32096|1796950944|3703|6669.34|141075.74|-2930607649235.206055|true|wind surfing|ulysses garcia|2013-02-18 21:06:48|2015-01-14 11:10:53.143208292|2030-08-16 +-60|-4804|-1988508336|7027529814236192768|36634.88|292742.80|-4785713903272.927734|false|kindergarten|bob young|2028-02-08 13:16:09|2058-06-30 12:21:12.174769417|2103-08-30 +-31|-15633|1109664665|-9105701280936501248|-7200.23|-3290965.94|-2858696088737.069336|false|wind surfing|nick king|2071-07-06 14:12:56|2072-05-07 22:23:28.909242175| +50|28118|-432218419|7570474972934488064|-23669.50|4714268.20|3257421256704.163086|false|undecided|nick white|2016-04-08 07:34:48|2041-04-06 04:08:01.271192129|2075-07-29 +-64|23229||-8430370933326536704|34019.68|1961397.66|2639849725646.250977|true|religion|luke carson|2079-03-12 02:12:51|2077-01-08 09:54:43.763841247|2004-07-25 +-49||152891873|417|||4308469366197.300781|false|forestry|irene underhill|2079-06-08 05:52:56|2029-11-07 18:19:49.927077461|2033-09-20 +-47|-20729|1673218677|49|37640.59||-713961786513.870117|false|debate|xavier falkner|2020-04-17 16:54:30|2028-06-03 04:57:19.609929160|2082-08-25 +-16||-1733560816||24701.68|-4273424.19|-2584537633556.181152|true|biology|david davidson|2053-08-30 09:50:24|2051-05-28 08:21:28.136207105|1991-03-05 +55|-24186|-1871209811|7068517339681259520|46423.34|2538160.45|-3739128624481.580078|false|yard duty|fred polk|2038-03-02 19:52:08|2066-12-04 13:20:32.335880942|1994-06-21 +-8|26625|-906986958|2187||2742196.26|-4507463460711.384766|true|history|wendy johnson|2064-06-20 11:56:49|2025-07-17 23:27:04.166297070|2052-06-20 +30|-12880|-40407627|2348|-6268.82|2266879.88|3002655520364.605469|true|topology|quinn underhill|2063-07-19 09:45:06|2045-02-16 17:39:10.146220452|2075-03-15 +-85|-15762|-1556127172|-7139677575412686848||-4483436.56|-1265174168415.015137|||fred nixon|2061-03-21 05:44:18|2014-10-08 04:02:23.556239597|2071-03-05 +-6|15862|1012696613|888|-22200.62|3639449.27|2198173643342.400391|false|biology|david hernandez|2051-03-03 20:56:40|2058-12-04 08:26:03.321045989|2027-01-07 +92|-23550|1238986437|921|49411.28|1421037.00|-4845225760189.179688|true|history|ulysses underhill|2036-05-02 17:20:37|2018-07-15 18:49:56.958391676|2092-11-15 +4|18984|-267130580|-8034414142083170304|34672.91|-4128287.64|766368935100.487305|true|education|fred steinbeck|2079-07-18 08:49:16|2045-03-01 15:48:34.880096673|2053-04-30 +54|29015|-1065248998|-7330203470474985472||-3027947.92|3399052323913.345703|true|biology|sarah ellison|2026-03-03 11:12:47|2057-08-18 20:13:54.240194383|2059-10-09 +-119|31140||789||-940868.71|-4363934228329.320312|true|quiet hour|zach falkner|2049-03-25 02:38:35|2041-11-22 05:46:48.4658675|2006-04-06 +-127|-31998|-71305062|-7158472098920390656|20591.34|3729248.23|-526374280413.826172|false|mathematics|ethan young|2066-06-15 04:16:02|2030-06-20 07:00:32.309007556|2006-02-26 +51|-1034|-103219371|-7881262505761710080|29627.76|712784.66||true|nap time|xavier zipper|2066-05-06 03:31:19|2030-08-29 00:39:40.299687444|2071-11-21 +-113|26241|-971203543|8451612303224520704|-29948.96|3327680.13|-18523066115.392578|false|nap time|quinn miller|2065-09-30 09:42:56|2037-03-26 02:34:03.815937235|2026-03-17 +104||-996953616|3541|18791.19|-462147.25|2541474056133.629883|false|philosophy|victor zipper|2060-05-20 14:26:32|2068-07-18 13:21:40.858914884|2085-03-29 +37|2784|644934949|8143462899383345152|19598.55|-4115146.61|-1524392874934.043945|false||irene hernandez|2047-08-20 09:34:21|2025-03-27 03:58:40.290018987|2096-08-25 +|-19276|605141554|8383159090746204160|45522.67|-4427581.33|-3551912779492.703125|true|linguistics|luke xylophone|2030-07-10 13:00:24|2048-12-11 23:41:59.785955383|2035-02-18 +-32|-24178|1148500740|-8923529803981905920|-36565.06|3529368.28|2380664601539.273438|true|topology|luke thompson|2073-01-23 21:54:44|2017-08-29 15:18:43.706057935|2022-08-28 +|-3813|1992977592|7885697257930588160|40610.97|4059042.75|-614948955390.488770|true|study skills|quinn garcia|2071-12-14 10:05:04|2022-09-08 09:37:06.875565982|1975-07-07 +30|10615|-847235873|9107991000536498176|-9874.16|-4892978.32|-830926430811.871582|true|values clariffication|oscar young|2031-07-31 13:47:34||2060-07-26 +13|-15389|865013617|1409|-45007.46|1282299.74|-3272482088828.603516|true|joggying|fred miller|2070-08-11 19:19:39|2075-09-01 02:16:50.347835856|2078-10-27 +-96|11809|-1644966759||-14002.93|-4469818.97|1917126159337.136719|false|biology|calvin quirinius|2045-03-07 23:39:15|2032-04-12 02:27:12.657670943|2009-05-05 +20|6372|1478365409|-8675892979328212992|38991.14|-2146326.91|-1669697999836.649902|false|joggying|calvin white|2027-05-03 14:40:13|2065-12-20 12:05:02.853602253|2009-06-25 +81|7299|-1126628450|8920344895701393408|-21909.86|-1392050.93|-3312751733215.968750|false|opthamology|sarah hernandez|2015-04-12 04:15:23|2080-04-01 07:17:45.74683010|2103-02-26 +-61|26540|-968377273|-6974654664348033024|-26186.32|3944430.54|1407237264653.069336|true|zync studies||2049-02-15 00:08:46|2021-07-04 09:11:18.595023146|2043-04-25 +|-13973|1912175355|-8879742387365429248|-24520.99|1718891.92|990917459853.477539|true|yard duty|alice nixon|2022-01-20 13:23:39|2035-08-16 20:58:12.262393960|2095-12-31 +-80|-15299|1318606691|-8938849835283677184|-8804.48|-4963660.13|-4051044335853.006836|false||quinn young|2069-08-28 15:39:57|2031-10-23 14:35:30.465215236|2027-10-23 +-33|-32462|1222935237|-7213775605408178176|9353.59|-2865973.47|-29816572801.699219|false|study skills|gabriella garcia|2073-11-24 15:12:21|2020-10-06 02:54:29.617493343|2018-02-28 +-125|22704|-1096771844|9199741683232399360|28595.90|3955962.84|4910989073004.792969|false|philosophy|ethan van buren|2062-12-30 06:36:02|2054-01-25 08:11:43.748458487|2081-06-29 +-22|-18659|1416850873|8489584373231919104|34567.10|-2690728.31|-4902808539333.527344|false|linguistics|bob polk|2071-07-20 03:43:45|2059-05-11 00:01:57.809402055| +-77|-19912|-340951385|-7344947507044466688|12056.25|-2933320.14|4292999497091.330078|true|philosophy|xavier robinson|2042-03-04 00:33:10|2081-01-29 00:05:29.43863329|2014-02-25 +-101|-13743|-158420748|-8756989568739835904|30210.95|1930694.98|-4980390206441.782227||mathematics|ethan davidson|2075-07-09 02:02:31||2004-11-05 +97|-2152|-1616030844||6923.19|1009414.39|-4803916908307.214844|false|opthamology|irene falkner|2019-05-12 23:35:04|2034-10-21 07:24:20.465428653|2083-09-11 +96|22603|925032386|-8046189486447017984|-8322.37|-3646620.83|3514393344286.617188|false|study skills|david polk|2061-10-25 22:52:55|2047-10-20 20:43:26.170125012|2094-08-30 +70|4491|-1362178985|1287|-45257.20|734363.29|-1819407181812.877441|true|values clariffication|quinn laertes|2015-06-04 07:45:25|2078-03-22 02:23:35.763695497|2012-10-25 +-98|-18140|-835198551|982|39755.57|1372628.14|-1714648472726.310059|false|kindergarten|rachel van buren|2072-04-30 23:06:10|2076-04-03 10:07:19.979852554|2058-01-21 +50|23956|2097519027|615|-24059.46|631764.92|-2495415685760.988770|true|forestry|sarah laertes|2028-12-22 13:28:33|2017-11-07 21:48:52.848530518|1993-12-02 +79|20872|1495575878|-7542857121910046720|-11668.81|-4226007.73|-2260276582420.459473|true|american history|quinn king||2044-03-22 10:07:42.593619051|1970-06-19 +65|-24591|-496870819|1648|-200.89|-3053285.53|-3512079952779.068359|true|mathematics|tom quirinius|2050-08-20 15:12:10|2020-12-29 05:36:09.651652502|2053-03-08 +-68|-29685|-1127100849|-8203008052020879360|37283.21|-1596933.52|4832262437578.585938||geology|oscar white|2048-02-17 07:53:44|2033-11-12 06:12:45.238117579|1973-05-13 +|-3206|-1741895392|-8688153842294595584|-2854.09|-1738342.95|-4557913856473.696289|true||rachel underhill|2077-05-20 02:03:40|2033-08-05 01:49:40.305347119|2027-10-12 +-98|16734|-596963345|-7707867749256445952|-47352.27|-4521296.31|3236784579167.704102|true|education|yuri ovid||2077-01-28 08:26:13.323779922|2093-03-26 +-66|-24368|1832650234|-7894382303337832448|2348.21|46896.52|-888063795956.135254|true|mathematics|alice garcia|2061-05-29 01:02:47|2018-08-11 08:18:59.362818129|2057-03-18 +-5|9716|-499533481|1667|44403.36||2674989060827.001953|false|history|oscar underhill|2055-10-25 00:02:32|2069-02-01 10:23:16.84520652|2053-06-24 +48|-9183|-607285491|8780196485890555904|75.56|-3870004.21|662312551004.902344|true|yard duty|sarah thompson|2076-06-19 08:17:36|2013-11-07 19:36:51.34765657|2045-10-15 +55|-15109|56316391|-7404057145074712576|2893.22|-4203203.26|-3201041913797.339844|true|kindergarten|sarah polk|2044-09-05 22:10:18||2093-11-05 +-69|-20946||-7433265617153343488|-47857.01|1123959.85|-4751032804232.125000|true|religion|gabriella allen|2046-02-04 08:22:40|2021-11-15 18:22:18.608780184|2069-04-29 +7|-1180|-423378447|1127|-30027.48|-1414107.87|4302554900117.384766|true|zync studies|oscar white|2020-12-23 10:18:38|2050-09-21 09:03:11.214720790|2099-04-24 +11|14376|234452496|-9149719074367946752|41986.01|4484955.64|-1431661810573.672363|false|wind surfing|victor robinson|2077-06-15 17:08:48|2020-07-26 05:43:36.11096510|2083-06-06 +-31|-408|-1933374662|763|5026.31|2220778.49|-1880721142952.310547|true|values clariffication|priscilla ovid|2079-11-27 10:31:13|2078-12-02 09:20:02.449941920|2020-09-15 +-124||206942178|3249|44696.56|-2844962.02||true|industrial engineering|zach miller||2014-01-24 20:41:26.351041828|1972-03-05 +74|-17840|307333276|-7708932208121225216|33649.41|-2209443.33|-3174835614765.178711|false|opthamology|wendy polk|2049-02-15 09:49:07|2021-09-17 10:39:35.795144931|1975-02-26 +35|6015|-1343327|2689|-34762.90|-3352811.58||false|nap time|oscar king|2061-03-02 03:38:49|2054-04-17 12:47:53.423961154|2027-06-04 +14|-20180|720703232|-7881351200983613440|-14311.83|1718525.86|4502617863309.339844|false|xylophone band|oscar garcia|2035-03-09 12:34:23|2019-05-05 02:57:22.203595007|2072-11-12 +-17|26296|344989592|-7515996202498473984|-29958.06|2161214.73|-209259678055.531250|false|philosophy||2068-02-10 07:56:40|2042-06-02 13:15:00.744949321|2074-05-17 +-111|13488|929751599|1681|15142.21|2124532.47|-4344059773498.542969|true|zync studies||2061-05-05 09:51:27|2035-01-03 11:31:13.490764419|2067-09-16 +-119|28064|631711489|-8507279516485566464|46368.28|1381513.63|-3198785922561.029297|false|philosophy|victor johnson|2053-09-16 22:56:54|2064-09-13 02:14:41.203564754|2052-09-23 +-3|-5897|997193329|1316|-36632.56||-4035360861641.272949|false|xylophone band|david johnson|2033-08-30 22:43:18|2021-08-20 07:30:06.17481138|2093-04-19 +22|-22922|626941809|7892026679115554816|-24481.02|4819862.86|3630292090950.355469|true|philosophy|victor ellison|2072-04-15 13:45:41|2069-12-27 19:14:43.4196486|1970-10-03 +|-22534|-1412187081|2279|35125.32|-1204413.83|13572776973.802734|true|history|gabriella robinson|2035-09-28 15:15:33|2079-06-21 21:50:11.269734623|2021-08-31 +8||842283345|873|44738.40|4982540.22|2545702719622.049805|false|chemistry|gabriella underhill|2044-03-23 02:31:44|2063-03-07 17:44:02.4074809|2059-06-02 +96|-5699|-163859725|8142241016679735296|-21581.78|-1551671.29|-2922841514857.416992|true|wind surfing|zach laertes|2014-12-09 06:23:35|2026-11-05 11:43:02.534377887|2023-06-06 +-2|-26128|1513689502|-7811060170911375360|5804.81|4404088.96|-3923411784468.591309|true||bob allen|2075-07-07 20:40:56||2096-05-07 +13|28551|-1568646283|8411494452500930560|-34399.96|2787253.76|-1592892075417.431152|true|topology|rachel brown||2025-05-06 23:15:59.141775309|1993-08-22 +52|40|587206979|2803|-38274.78|-3013975.64|4769880713478.898438|false|joggying|alice zipper||2062-08-30 07:04:29.405219012|2075-12-10 +-77|-13238|-866304147|1247|30977.58|-1455951.83|-379569461462.274414|false|forestry|holly young||2060-02-22 09:39:53.875517714|2054-12-02 +-16|13154|1141303816|-7673901622181953536|32011.03|-2072819.72|2331383540354.394531|true||quinn xylophone|2013-06-15 10:55:00|2066-12-15 17:24:21.774901994|2038-05-15 +-94|-21451|-203416622|268|33257.23|3858254.19|4841902350857.658203|true|geology|nick miller|2030-02-09 07:34:05|2061-02-09 17:09:30.792720771|2026-01-05 +32|13150|2142592987||21739.73|443657.37|1617671196062.850586|false|values clariffication||2055-04-27 17:42:09|2035-04-30 09:03:34.834154256|2048-07-11 +85|-1767|-1977762695|-7195217207163166720||2897773.78|4178958976210.437500|false|values clariffication|oscar quirinius|2071-10-09 18:35:28|2039-08-28 00:08:15.223157462|1976-06-27 +-1|-19464|538766635|-9012093603044245504|16783.69|102999.04|-3761561711229.433105|false|yard duty|irene laertes|2039-01-07 08:49:02|2077-12-04 00:37:45.588821251|2044-03-15 +84|7569|1739911574|8920533610804609024|39584.58|-2087360.22|2573136281954.120117|false|quiet hour|ulysses ovid|2058-06-30 06:37:18|2028-03-12 18:17:11.549298490|2073-09-23 +93|-254|1159353899|455|-26164.13|-2925253.46|-3708898652238.446289|false|debate|ulysses polk|2071-12-06 03:25:24|2027-01-09 04:39:00.318567529|2103-06-02 +-77|30619|1370723240|9148071980848742400||2115468.25|-2676483804058.200684||education||2076-06-26 22:59:22|2068-09-02 06:39:15.162172013|1981-02-07 +-76|24512|1316931||43602.63|-4235241.77|-2225959074468.946289|true|study skills|priscilla brown|2069-04-16 14:07:02|2059-05-03 07:04:57.531362893|2040-08-20 +|-17607|-628790799|-7093825013581979648|41798.75|789629.23|4045302222731.130859|true|topology|holly nixon|2034-09-29 04:12:38|2028-09-01 15:46:24.993528329|2040-12-14 +-48|18142|1203482872|1342|-48524.80|1488860.19|1080708792025.255859|false|geology|david underhill|2038-12-12 20:41:41|2054-06-04 10:17:24.350972371|2048-11-22 +-35|-28366|1182646662|7295926343524163584|37147.82|-17339.38|850711115335.975586|true|religion|fred ellison|2056-12-16 18:57:16|2030-08-08 13:20:23.87227693|2061-10-03 +|-11187|2127682701|7874764415950176256|21152.51|-1228307.04|-3933963121027.943359|true|quiet hour|sarah garcia|2061-05-01 14:39:07|2039-11-30 14:02:00.787382842|2063-08-30 +96|-25282|2009215103|8682955459667951616|-30776.76|-951920.19|339774798412.620117||philosophy|bob thompson|2018-01-25 07:52:23||2072-09-01 +75|12802|872554087|8825059717746376704|-23535.33|-3969818.94|-2914303137233.673828|false|wind surfing|jessica xylophone|2024-10-12 09:03:29|2080-11-22 08:10:16.820114627|2023-11-21 +-91|-31335|-1575588203|1651|530.17|-2153555.94|-2020396257019.356934|true|history|calvin zipper|2036-06-05 07:17:08|2077-07-11 08:39:35.486363899|2097-02-12 +123|-25183|69110370|1187|18383.16|-1514304.51|2213587444204.989258|true|undecided|fred ichabod|2027-01-18 08:53:58|2053-06-12 11:25:39.320312625|1998-09-27 +-66|21168|976870621|3197|-21478.88|1822183.11|-2601260542275.482910|false||calvin polk|2030-09-09 10:31:01|2071-07-11 10:12:44.480141278|2040-12-04 +108||-144862954|1115|37537.67|-3636489.74|-2775808925857.248047|true|values clariffication|sarah ovid||2045-02-17 17:37:23.51474731|2048-11-26 +-116|22118|1117805438|8945302550165004288|37582.57||501497065597.860352|true||calvin zipper|2066-05-15 01:52:49|2046-09-19 21:50:44.293786457|1977-03-05 +107|-23153|1571267481|9114850402293882880|-30178.38|1989319.92|826991484976.567383|true|nap time|gabriella underhill|2046-10-28 01:02:40|2045-01-12 05:45:37.921528799|2014-07-18 +97|12327|1603612975|8935252708196999168|16046.26|-67740.16|2793889545913.791992|true|opthamology|gabriella steinbeck|2043-09-23 14:40:09|2078-01-01 15:35:48.152129849|2031-05-27 +39|-16820|43983130|2663|-27389.47|775624.70|-1775814976651.695312|true|chemistry|luke allen|2021-03-30 12:39:17|2043-02-02 21:34:00.252214409|2080-09-28 +-73|-15866|-99205196|-8535957064499879936|-48198.87|-2414154.16|349441023585.761719|false|study skills|priscilla hernandez|2055-03-14 23:42:22|2027-01-12 03:52:14.47123348|2080-01-02 +70|-25463|748185058|9091082386452684800|48356.67|-3526156.73|1250977516314.754883|true|industrial engineering|alice quirinius|2015-09-13 22:17:45|2072-08-09 11:39:30.710221441|2052-04-27 +45|20124|-470798506|2915|43588.39|-762457.78||true|forestry|wendy carson|2072-06-06 06:46:18|2014-12-01 04:17:26.513106497|2055-12-01 +28|-9196|-1947868215|2183|49404.36|-3414112.11|2109147570161.909180|true|geology||2027-01-27 11:49:05|2061-05-01 06:27:32.791222399|1990-10-25 +14|25381|659397992|-8147405381260345344|47303.15|-3806020.32|997601888271.546875|false|values clariffication|fred brown|2074-06-25 18:50:23|2057-10-25 22:57:33.478783182|2105-09-25 +82|-1367|895763504|-8654797319350927360|1232.14|851336.52|-3820905742244.491211|true|religion|bob steinbeck|2032-12-27 16:52:15|2041-10-19 14:13:28.458891032|2104-03-31 +18|-13466|-343173797|1261|-24504.59|-2671371.43|4817498142971.957031||linguistics|fred ellison|2053-12-16 04:23:07|2021-12-04 13:24:23.478621300| +113|-29988|-213198503|8775009214012456960|29118.01|-2672928.16|3597735313714.380859|false||holly allen|2038-04-29 17:39:45|2035-02-10 07:52:53.120396795|2007-11-09 +126||1844415080|2810|-43710.73|4357967.19|-3835582850357.113281|true|history|holly xylophone|2056-04-30 00:14:07|2037-07-23 02:43:53.584900523|2038-06-02 +126||43672187|9067985867711291392|8608.12|-3919341.08|-1433926422321.541016|false|linguistics|alice johnson|2040-06-19 19:13:38|2032-01-20 11:14:11.710454857| +18|-31663|-1721368386|-8240684139569233920|-41884.25|-4050155.29|-3799361430049.685547||linguistics|wendy brown|2067-02-20 18:47:51|2075-11-01 10:27:41.797075523|1973-08-26 +-115|-20828|-1477897348|1719|19183.72|3667748.43|196163723446.253906|false|philosophy|nick ovid|2038-03-16 23:59:43|2031-04-04 05:56:43.142902096|2094-02-12 +-61|20411|2076370203|7917494645725765632|34656.95|-1033805.16|-2293878051389.661621|false|education|bob thompson|2041-05-09 00:13:16|2064-12-30 17:41:02.427765133|2096-07-09 +100|410|2057486961|3622|49893.07|4038206.70|-434822164725.155273||linguistics|alice laertes|2059-12-30 17:21:06|2067-10-16 22:22:23.909102811|2036-12-30 +68|26697|-1079633326|-8866442231663067136|-43306.72|-616547.34|-892751806585.678711||opthamology|katie robinson|2066-09-01 03:27:00|2059-11-14 17:00:11.431792872|2052-04-27 +6|28146|-309571354|7871554728617025536|10210.31|2684254.99|717298056153.232422|false|nap time|gabriella steinbeck||2079-10-05 02:45:16.165020579|1988-02-01 +-94|-22554|-1114208576|-7380731416973295616||4566778.78|4025279026650.033203|true|xylophone band|jessica hernandez|2060-08-11 01:49:22|2019-01-16 09:25:47.428200312|2038-11-22 +9|11059|1114521964|-8603817012434198528|38309.84|-2121451.24|-1723505323178.535645|true|values clariffication|victor garcia|2076-02-21 16:02:29|2024-12-31 21:24:49.835872918| +-34|10425|-45439614|-7192529627893858304|21468.82|666967.79|-1710357667596.511719|true|history|david ichabod|2051-07-14 18:03:02|2017-02-27 04:57:13.944522320|2050-01-25 +36|31099|-1108723753|-7287583262310350848|-61.04|-2662929.33|3709449843031.081055|false|industrial engineering|luke quirinius|2036-01-13 20:52:17|2062-10-11 17:14:59.629263445|2045-02-24 +113|-22915|-1352545619|7149417430082027520|-6486.11|-3357464.95|-1319020386301.517090|false|chemistry|yuri carson||2051-08-31 14:41:56.890230740|1992-11-06 +-45|15011|-1061859761|7237310132329488384|-39128.71|319025.53|-1778680361202.553711|true|mathematics|victor nixon|||1998-02-21 +-18|31370|1204834275|-8659643752269242368|39007.50|1149036.85|-2714226684056.946289|true|american history|katie thompson|2027-06-17 03:56:45|2070-08-03 17:42:35.323901256|2099-12-02 +|20969|-469749219|-8051395538179063808|32233.91|2215432.29|-3014965054465.008789|false|wind surfing|calvin underhill|2014-12-17 19:52:13|2022-11-29 17:49:15.855148488|2066-10-20 +-68|-4142|1488440165|1674|-20812.84|253603.39|-2105480401864.810059|false|quiet hour|irene steinbeck|2067-02-13 02:16:25|2022-12-16 07:14:39.23664527|2046-07-15 +-68|27630|1575300276|-8813211231120031744|-14758.39|-1926059.11|-2833925266574.155273||chemistry|ulysses allen||2032-05-08 18:36:19.400564422|1988-01-19 +-106|25835|-533227056|-8103788088118018048|-549.34|4959162.61|4308090638361.007812|true|kindergarten||2073-10-28 10:37:18|2079-07-06 02:03:22.207944103|2028-04-09 +-57|-4726|814544198|7049773031131283456|10513.01|2178130.57|-2416479446500.758789|true|biology|wendy johnson|2020-03-30 21:56:57|2029-12-16 16:34:42.572182352|2097-11-09 +-80|21784||-7035132060308643840|-6419.44|-4383105.22|-2020072615248.374023|true|philosophy|xavier hernandez|2018-09-25 15:18:36|2031-06-28 02:55:11.31963432|2068-08-05 +-78|13795|-1769423338|-7303847963918393344|43895.68|-3084959.79|-4175764579458.167969|true|opthamology|gabriella johnson|2043-10-13 05:10:09|2048-05-19 05:40:47.176953399| +-69|-16027|819069589|-8046238369820344320|-22162.98|-345358.25|674713800507.689453|false|linguistics|zach thompson|2015-10-24 08:01:09|2020-06-08 12:08:21.511690843|2019-10-03 +47||2068018858|-8503573595507761152|-10699.81|-2421607.62|2965768864894.222656|false|undecided|rachel ichabod|2069-06-04 05:16:15|2061-09-03 01:12:25.916465543|2030-10-14 +85|26579|-1371840597|8822384228057604096|-41991.30|-138391.14|1063729386321.732422|false|debate|jessica xylophone|2054-11-01 01:40:23|2069-12-06 00:11:49.576880733|2090-07-08 +110|-9759|-136514115|-7998947380180819968|-17463.46|1862978.01|-3370842772950.304688|true|study skills|irene ovid|2059-11-03 20:21:53|2061-07-03 17:32:49.424965303|2015-10-17 +-26|-10316|-71449585|-7057750467944931328|7153.05|-3062092.43|2726424274449.748047|true|american history|fred brown|2051-03-07 14:50:11|2027-05-29 12:10:20.647953531|2096-04-06 +113|-22215|674547678|1862|-27979.49|2034087.73|1306104776811.629883|false|industrial engineering|tom johnson||2021-03-25 16:24:12.665579887|2101-08-21 +56|8551|-733239404|8583916402383601664|47565.67|-4620642.81|-2470480735156.428223|false|undecided||2027-03-26 08:37:58|2079-04-18 01:15:58.35100799|2056-01-22 +98|-26361|1489169773|2398|42357.85|-3934751.72|-4429578287144.977539|false|mathematics||2048-03-15 04:39:44|2033-11-06 05:22:43.819682351|1986-02-17 +-32|-19330|2068538934|7487538600082554880|7664.98|4132039.93|-2902910424869.702148|false|geology|quinn polk|2041-11-01 21:08:54|2035-01-03 23:06:46.765787876|2071-02-14 +123|-20409|115470151|8677794924343164928|12145.54|-4842892.80|400760479300.927734|true|xylophone band|mike ellison|2080-02-28 20:34:51|2014-02-14 03:41:40.325566176|1989-11-16 +-13|31135|6526476|8708232769657815040|-14997.43|4694403.83|333471522081.652344|true|xylophone band|sarah brown|2021-01-12 23:04:21|2067-12-27 14:26:36.287488601|2062-07-17 +51|-27259|1829544791|-7623359796281999360|-34844.45|-3126581.66|-2421986983728.814453|true|education|tom miller|2044-10-28 16:20:50|2018-07-05 05:25:15.270012699|2071-07-26 +-95|-17690||8983912573761167360|31719.06|-3442868.07|-1937958357952.531738|false|kindergarten|irene ovid|2058-03-03 15:38:13|2069-02-04 13:07:32.271143352|1983-06-19 +10|-30669|1590744669|-7263060340185194496|-30819.18|1534635.31|4148791909569.630859|false|kindergarten|quinn white|2027-07-01 19:44:37|2056-02-18 15:10:02.841254943|2091-01-11 +93||-971698865|-7873753603299540992|21852.31|-2581251.56|1539283723979.209961|true|quiet hour|bob carson|2026-11-09 05:31:12|| +-28|-18659||-7720966287634112512|20392.44|-2222926.64|1939030156067.396484|false|american history|irene allen|2032-07-18 12:44:48|2026-12-26 16:48:27.776120795|1987-11-12 +-45||-40284975|1806|35653.78|1321025.73|898563412192.885742|true|mathematics||2070-01-12 03:23:36|2034-01-24 11:57:42.385238227|2057-09-03 +82|-13539|1456367662|-8260340354454503424||1093063.49|4557861543446.826172||religion|quinn xylophone|2078-09-07 14:49:27|2025-08-16 09:10:36.887036096|2011-11-10 +125|5469|889772203|2842|38632.60|-4727107.13|2741920281773.531250|false|joggying||2038-11-21 14:39:38|2040-01-28 08:27:19.830854683|2085-04-27 +21|-20517||7069729473166090240|-14418.84||735618454615.902344|true|opthamology|yuri ichabod|2027-06-30 14:53:30|2016-01-03 18:53:34.457261740|1970-04-12 +-96|-25734|-273937943|8688483860094599168|927.24|931681.37|4368902606237.980469|false|debate|sarah steinbeck|2023-10-17 00:33:13|2043-10-07 00:08:12.855001312|2038-03-10 +-82|-23719|-318206520||-23783.97|-837555.18|1762959989546.857422|false|study skills|nick xylophone|2034-11-18 23:41:57|2052-11-30 19:49:07.414111749|2033-11-10 +58|7697|-1437126017|7517159036469575680|-1037.26|-3799864.48|-1963988180877.791504||wind surfing|yuri garcia|2070-10-26 10:29:55|2046-12-29 13:30:24.655176498|2075-11-28 +26|16312||2218|46235.86|-3813446.01|-4186362730051.698242|true|topology|priscilla brown|2055-05-14 21:50:40|2037-10-04 05:24:46.310622445|1998-02-21 +-95|28923|-1769037737|-8996954350906294272|5131.31|-3335839.19|892560184430.806641|false|values clariffication|victor underhill||2051-04-05 17:19:55.107473640|1979-01-11 +89|20014|945683736|2089||2623267.83|-3717730752750.725586|true|debate|gabriella quirinius|2078-08-08 13:57:39|2062-07-26 14:12:36.59165411|1999-03-25 +-52|-12809|-1533934649|-8651641150831362048|48413.99|345690.54|2773050456079.104492|false|linguistics|calvin thompson|2045-08-26 14:34:17|2017-09-25 09:26:40.856875502|1979-12-19 +-107|13792|-1212524805|-8322751250650218496|26905.38|2307830.54|-3744767732184.754883|true|study skills|tom steinbeck|2027-12-07 07:23:38|2030-09-13 21:30:52.282498698|2061-01-04 +-65|21299|-1222897252|1495|-31920.07|-1723567.09|2996766542753.092773|false|wind surfing|zach white|2025-09-09 20:08:36|2066-06-07 03:56:15.873165726|1992-10-17 +101|30222|1667594394|-7848043121524228096|4374.75|1994312.07|4481698153938.253906|false|wind surfing|jessica zipper|2050-04-22 23:54:13|2047-05-21 07:47:57.777923762|1996-11-24 +94|23766|-511198293|-7506254246954500096|20950.55|-748117.50|4939615715200.673828|true|debate|rachel ichabod|2065-05-20 02:43:05|2026-06-12 20:30:30.941431912|2084-07-12 +59|5445|1974939899|7487338208419823616|44886.11|-4889191.55|4911721874172.769531|true|mathematics|gabriella steinbeck|2025-10-07 03:42:36|2045-05-23 22:17:04.9265626|1972-10-12 +-99|-29601|816439627|-7875953567586451456|41944.06|-1349023.71|-1099429516405.692383|true|opthamology|jessica polk|2039-01-01 04:30:38|2076-02-14 07:29:41.551319075|2097-02-19 +99|-3318|-1017027298|1863|24757.93|4444192.30|-3601627653993.517578|true|joggying|victor young|2031-09-16 06:06:37|2050-03-15 17:15:50.541208509|2079-07-14 +-27|-3540|133276416|3835|-506.62|1920067.41|412688526446.700195|false|debate|jessica young|2039-09-26 14:48:34|2077-07-07 08:15:18.365301835|2084-11-07 +-68|11572|25644069|1870|24153.48|4976723.43|-137386195040.676758|true|zync studies|holly falkner|2063-06-20 10:43:27|2024-10-19 14:18:31.573357244|2011-02-18 +89|16250|491016124|-7978782649203228672|-46524.54|1265452.96||false|opthamology|luke allen|2056-12-07 20:53:08|2029-09-05 03:42:32.368459710|2083-12-25 +-16|-19213|1509573831|3960||3292048.84|-240494562170.781250|false|biology|wendy hernandez|2051-02-06 00:15:05|2041-03-28 19:17:32.296675612|2083-07-04 +-56|-6518|-1478812842|-7663293054873812992|-24105.64|-1332349.52|-1890560158574.032715|false|chemistry|sarah robinson|2069-08-29 19:26:05|2046-07-10 19:51:40.969328493|1973-05-29 +-24|28077|-234278308|898|6115.34|652033.34|-4372957414734.699219|false||rachel robinson|2043-06-18 12:08:10|2066-02-25 21:32:14.879009176|1986-03-28 +51|-24313|-938342473|3728|4223.26|-4736632.77|-2080633933255.846680|true|biology|wendy thompson|2019-03-20 15:36:37|2025-06-22 23:33:35.838318924|2083-02-02 +-21|-4539|1469775272|-8615168537390571520|-13079.77|-3313208.22|3092499128010.040039|true|topology|quinn falkner|2073-05-16 23:56:42|2017-07-20 03:37:48.334651957|2055-08-30 +121|29828|-1426893312|3579|4812.14||586167842633.002930|false|wind surfing||2022-12-05 16:40:51||2069-05-27 +-79|-5314|-234758376|8111757081791733760|35336.11|4023549.09|4664860898254.835938|false|mathematics|ethan van buren|2072-06-06 05:31:29|2068-02-13 01:16:52.645583397|2065-07-21 +10|-27295|-1299159155|3111|-40569.04|289098.86|-3359213193119.252930|false|kindergarten|alice ellison|2013-04-01 23:26:31|2042-01-18 04:14:18.953737200|2037-02-15 +36|6892|1090344463|2539|21579.89||-2076008548637.773438||biology|sarah van buren|2044-08-16 16:50:11|2078-11-12 05:33:45.47089985|2054-12-18 +-78|10807|550594651|7497306924248834048|-15217.81|3701631.53|-1010442667526.625488|false|religion|nick garcia|2039-02-22 19:12:26|2058-01-16 13:10:53.407263384|2056-02-09 +-42|26236|-2019287179|2619|13960.36|3980665.59|2745518512550.937500|false|wind surfing|alice young|2049-06-17 08:19:54|2042-10-24 00:38:21.665346021|2001-08-01 +-90|-13424|631954352|1423|43085.76|1692557.60|2335092836258.532227|true|values clariffication|xavier ellison|2028-04-08 19:20:25|2050-10-22 21:57:15.666787161|2100-05-23 +66|-1679|472901914|7652123583449161728|-45141.82|-189883.01|2005119271038.431641|false|education|zach miller||2046-04-05 20:49:51.831050123|2081-01-30 +-61|-19291|936752497|7271786885641666560|6407.13|-2622282.33|-1364893278530.487793|false|philosophy|ethan nixon|2064-03-25 01:50:11|2078-07-14 10:27:03.640728827|2024-09-18 +53|-950||1447|-46967.91|-2338643.42|-3010460855327.999023|false|linguistics|wendy underhill|2061-05-09 13:52:02|2052-06-04 16:42:35.115432702|2058-01-11 +12|-19479|1112783661|2553|-13650.84|-1699487.46|-4635813668758.892578|||nick king|2019-03-23 10:58:33|2071-10-07 13:18:51.554325420|1978-08-21 +-8|26869||8148211378319933440|45519.19|1788716.14||false|values clariffication|fred xylophone|2060-10-30 01:15:38|2071-02-14 08:59:08.385880302|2083-02-05 +0|29498|1393262450|7450416810848313344|-47360.59|1197766.74|1948267796553.970703|false|quiet hour|ulysses johnson|2064-05-23 05:56:44|2064-08-16 10:07:06.357883907|2049-04-05 +37|30056|131031898|2979|16137.50|1448922.16||false|zync studies|calvin allen|2078-08-01 20:45:50|2077-07-22 17:11:31.657495112|2077-05-23 +117||1751468853|-7120456708338688000|-45241.05||4658251999119.685547|true|forestry|tom ellison|2069-06-01 15:56:17|2030-05-31 03:22:11.194126870|2033-08-06 +-120|-31659|-2016985611|7242751359672631296|43128.70|-191280.75|-2180975729787.064453|false|undecided|calvin ovid|2081-01-06 19:08:32|2056-06-12 01:13:12.668973447|2007-12-19 +126|-23667|2111462911|9083076230151864320|11873.69|4280564.29|2689053897530.797852|true|chemistry|katie zipper|2041-07-26 09:46:11|2052-08-09 23:10:15.912587493|2004-10-04 +92|7066|1785455842|-8754992450211692544||356819.61|-319780935492.240234|true|mathematics|ulysses robinson|2069-07-31 07:42:34|2025-12-14 11:43:12.789221272|1989-03-12 +-23|11788|278601840|1537|-3884.62|1500759.88||false|geology|ulysses king|2061-08-15 00:45:19|2052-07-01 09:00:11.170504787|2080-01-24 +120|-27232||7845953007588401152|30784.40|3725404.09|-4223940428806.862793|true|zync studies|yuri polk|2046-03-19 03:26:49|2028-12-07 07:12:17.409326696|2103-03-11 +0|22388|1145627305|9023663198045544448|-16680.30|-4179831.25|-4258847848573.274902|true|linguistics|jessica ellison|2056-08-10 14:54:29|2067-07-03 10:13:58.634487256|2039-05-11 +-102|256|352214248|8900545829211299840|-8142.34|1762753.22|2953930180391.744141|true|zync studies|calvin allen|2053-03-31 14:32:12|2054-01-17 02:39:01.769490002|1989-08-23 +104|-8706|-400501472|1866||-4687152.61|-4538289046733.225586|true|opthamology|fred van buren|2045-10-06 23:55:19|2063-11-06 08:47:29.289513742|2067-08-04 +-74|-22241|-1216166764|-8965578088652095488|11351.19|13624.69|4203637057188.273438|false|topology|nick king|2056-03-27 03:36:35|1970-01-01 00:00:00.158770873|2039-09-02 +-12|-10623|-445353909|154|24563.70|4730542.74|-2976281476329.525391|true|history|quinn davidson|2071-04-24 15:00:40|2016-03-02 22:10:50.657625756|1976-06-03 +8|-32491|-1232183416|2905|-45693.51|3407626.79|-2909647656864.500000|true||sarah underhill|2038-12-19 06:40:41|2070-01-02 18:09:04.499401693|2038-01-27 +41||2090496825|8693036785094565888|449.14|2113679.49|-1419899825383.019531|true|chemistry|wendy polk|2068-07-21 04:20:28|2042-05-24 13:18:57.760909140|2050-11-29 +43|-17366|41063276|3555|-23520.95|-3021088.64|-2720359307901.567383|false|philosophy|wendy brown|2080-12-20 01:36:25|2065-03-06 19:37:09.328344492|2090-05-03 +62|26288||3910|-10665.79|4113207.86|-4165972904406.558594|true|values clariffication|gabriella van buren|2028-01-28 10:55:38|2055-04-02 10:27:11.136514457|2005-08-12 +-76|27523|-191899537|-7792903881635938304|-988.63|2575958.83|134807836030.148438|false|quiet hour|rachel thompson|2064-09-13 12:05:33|2068-07-23 02:24:15.415114292|2006-10-22 +38|-15944|-1141801925|-8395998375405912064|37395.60|-2057132.64|-3432804499574.260254|true|geology|oscar young|2070-12-02 02:21:28|2022-02-22 23:45:54.417929187|2101-12-05 +-9|32017|1367179645|-8503342882470019072|-32948.66|2596690.92|-4465574272510.082031|true|american history|fred falkner|2028-01-06 05:29:29|2034-09-22 22:01:59.220097380|2103-12-30 +-37|8727|523289079|3946|-43342.52|2528654.53|-3334121644829.863770|true|history|gabriella ellison|2039-06-14 08:58:40|2023-05-10 22:56:14.107897590|2058-11-03 +-16|-1749|209430502|2662|-27058.30|1085405.78|-478847345395.791992|false|linguistics|victor falkner|2026-08-15 13:38:26||2101-06-16 +29|20270|-985817478|2848|-17564.28|-1085683.98|1245671080660.350586|true|philosophy|nick ovid|2077-11-22 03:06:05|2018-10-17 23:16:57.22955518|2059-05-05 +47|15266|670667262|3974|48495.59|-4950157.96|2359236945727.296875|true|biology|wendy carson|2062-12-04 16:15:27||1991-07-04 +|-19295|1677197847|8201491077550874624|41623.77|1350136.93|-1226174674712.830078|true|study skills|gabriella ellison|2016-02-17 02:12:04|2043-04-04 00:37:27.881714077|2027-09-11 +|12722|930008274|2386|-2270.46|1951711.24|-2269419016241.016602|true|history|priscilla zipper|2023-03-20 02:05:11|2058-08-15 16:21:32.650056302|2082-06-21 +102||-1112062809|690|-44270.30|-967065.47|4944863867873.259766|false|mathematics|victor nixon|2062-01-19 04:23:24|2034-11-05 03:07:33.846605261|1982-09-16 +-100||1091736925|-8962547695651323904|18808.85|-2597624.19|633233154165.040039|true|study skills|yuri nixon|2027-02-06 11:15:50|2068-11-02 21:11:23.741655982|2094-07-19 +55|-26304|1054864168|2227|-25135.64|-2339553.19||false|mathematics|jessica carson|2018-08-30 01:06:23|2078-11-30 02:36:40.607075532|2013-09-08 +-92|29168|-560322190||22456.65|4966767.72|-4492662222754.084961|true|education|ulysses ellison|2026-06-11 07:20:10|2028-10-07 16:00:16.652808441| +127|-6294|-41242237||-24339.41|-4580152.68|-4150973486962.570312|true|kindergarten|rachel johnson|2025-04-25 21:40:13|2064-06-21 01:47:28.67945256|2026-04-28 +-104|-25115|824836988|-7020852530219171840|2631.26|-1696924.47|2636281196281.515625|true|religion|ethan king|2064-04-06 11:56:42|2079-03-23 13:15:05.713728857|2012-06-08 +82||373031319|6967631925774639104||-3338254.44|500376662732.691406|false|geology|alice johnson|2046-06-05 16:50:34|2033-10-31 22:49:14.847949456|1990-11-23 +-6|-30482|1275228381|8145750910080745472|-32038.63|-2954981.27|2949286209907.953125|false|linguistics|david young|2035-10-06 02:29:21|2031-02-11 07:57:23.673629394|2105-04-02 +113|-23323|-839512271|8994608999945125888|-28055.16|1643018.67||false|education|priscilla steinbeck|2054-10-12 02:43:21|2027-05-27 09:57:29.960419451|2005-01-23 +15|-7024|923353533|3940|7872.92|-2081172.60|-2466850727047.962402|true|wind surfing|ethan king|2027-10-04 14:01:27|2049-07-15 07:58:03.472552275|1988-08-26 +|-6283|-311437801|-8990843030306717696|-41276.81|1722079.14|2070485805872.195312|true|wind surfing|nick miller|2022-01-16 10:36:26|2029-06-16 15:44:00.263282479|1993-07-11 +25|6980|-1892816721|-8182421179156905984|32052.05|2697304.50|-2452231930319.044434|true|mathematics|sarah van buren|2057-11-28 06:45:32|2065-11-02 04:49:45.252309410|1984-05-26 +-25|26744|-1489628668|-7878145001776152576|38864.72|-543066.51|-3989748992325.772461|true|philosophy|tom quirinius|2027-08-03 17:06:47|2071-12-24 20:56:38.549176629|2093-04-14 +-122|25038|100270148|9096395849845194752|16126.36|-625257.45|-3447447963466.885254|true|opthamology|victor ellison|2036-07-06 17:42:38|2023-05-13 17:55:44.745449578|2082-09-17 +69|-16570|-1870912732|-7199983995864711168|-11501.29|-2355822.95|4784415058876.519531|false|american history|holly hernandez|2074-11-26 21:56:49|2076-02-03 03:26:47.100157028|1999-06-03 +-12|14213|||46313.84|-3912157.22|2214476493452.331055|true|linguistics|luke allen|2052-11-30 04:15:17|2068-10-27 11:57:03.945913098|2006-05-04 +56||658008867|7868367829080506368|-34336.86|-467709.59|2729407234427.085938|false|undecided|alice hernandez|2058-12-23 05:19:29|2038-03-11 15:26:58.124711805|2064-06-26 +-30|31795|217476429|2001||-140021.64|2811318250004.752930|false|chemistry|jessica thompson|2040-02-02 16:00:13|2035-08-19 22:22:12.933201371|2082-12-04 +96|9264|-1057522129|-7429331808102899712|10703.15|-1686203.10|-363688684224.463867|true|kindergarten|ethan quirinius|2081-02-04 05:16:49|| +-70|23063|-1511162508|8928133990107881472|22991.39|-2823357.23|-786567683392.114258|false|debate|calvin van buren|2054-10-09 00:55:52|2053-06-14 18:30:38.792560125|2077-10-29 +106|2326|2080412555|-9175279464813223936||3763969.37|-3478279974550.986328||forestry|fred van buren|2079-08-05 12:51:53|2025-05-02 08:19:30.267343837|2007-10-02 +58|4905|596802082|3467|48922.94|-2427646.65|-1956656626084.800781|true|religion|david brown|2059-08-27 02:37:38|2074-02-26 19:05:09.130777914|2104-03-09 +67|-31596||2692|23665.84|-2517144.39|-3266719980073.062500|false|debate|nick xylophone|2018-03-26 01:09:51|2048-01-16 05:58:57.246574643|2053-10-18 +-24|-13948|1063524922|383|-23278.06|961810.86|3461107192485.286133|true|opthamology||2063-12-10 23:49:15|2014-04-14 19:52:11.36942801|2023-10-23 +-51|-19413|-120704505|2180|7673.97|3840971.12|1717287804088.129883|false|philosophy|luke king|2021-11-07 16:27:24||1974-07-19 +8|-12564|172075892|7299197687217856512|-43842.34|-3395074.10|-4225250387427.301758|true|forestry|sarah nixon|2053-04-02 00:45:22|2071-09-12 07:22:30.450556241| +-107|19917|-1945738830|8295110846998233088|-28431.96|-4322868.81|786529992285.522461|true|values clariffication|sarah carson|2067-08-06 19:51:59|2058-08-28 05:24:49.32302645|2042-12-12 +-8|27787|1895282160|8487573502287478784|19758.36|-4910924.30|-4340220649412.813477|false|chemistry|wendy van buren|2048-05-30 14:41:37|2021-12-22 11:40:34.570668347|2002-02-21 +-43|-14055|-1283465451|3911|-34319.73|-1413420.27||true|education|xavier quirinius|2051-02-05 11:30:07|2035-06-24 01:08:21.175238432|2063-11-21 +-79|-11393|936133387|-9117959922369060864|41437.58|1156422.75|2370005565831.085938||opthamology|gabriella van buren|2063-01-25 22:56:53|2026-06-13 16:13:15.739664221|2025-10-17 +23|-9368|-1212433954|3255|29064.07|-3454874.42|-4178431684249.020020|true|values clariffication|mike underhill|2048-01-22 12:12:53|2078-10-25 09:35:47.422665226|1976-07-11 +-3|29992|284646137|-8797972842900307968|2832.96|866000.62|2948618846342.725586|true|forestry|alice van buren|2053-08-08 14:24:11|2043-05-05 17:25:01.599668995| +-44|5542|1194089079|1065|-43883.09|-3705656.11|2618341807790.152344||yard duty|nick falkner|2048-08-07 23:37:11|2065-01-12 07:32:17.848708633|2089-05-10 +50|-17236|-1248781172|9188173682239275008|-932.46|4283902.51||false|linguistics|calvin quirinius|2066-01-02 06:15:34||2038-11-21 +-107|17373|-1442424087|-8623238306523824128|5552.38|-3458132.26|921960196859.856445|false||luke ichabod|2034-12-14 15:18:02|2059-03-22 11:58:34.314644363|1980-07-19 +-73|-9102|266601601|908|41081.46|2657372.15|-837837614763.010742|false|opthamology|irene quirinius|2028-03-21 14:39:00|2078-05-11 17:58:05.60588994|2059-10-02 +9|32212|-423190290|-7262798781688651776|38636.43|-4573720.06|2536259966692.461914|true|xylophone band|nick underhill|2031-05-15 12:47:25|2047-09-13 08:04:34.722807010|1984-11-19 +23||-442732016|8333523087360901120|14531.11|69068.15|-2057600166422.162109|true|study skills||2020-08-30 17:51:25|1970-01-01 00:00:00.454328290|1974-03-30 +12|-8321|-1421396891|8454154705460666368|-5227.53|2886887.00|4349398966437.156250||education|irene zipper|2041-10-24 12:28:05|2073-03-11 08:14:09.756114070|1994-11-10 +-98|-30244|106847364|8457906374051020800|-12308.81|1643233.89|-1862982340067.276367|false|wind surfing|luke garcia|2048-08-27 15:36:59|2022-05-09 11:11:32.853100609|2045-03-09 +69|-3405|488014426|73|-28346.79|2731540.33|2267354345396.413086|true|kindergarten|holly thompson|2064-10-26 11:39:58|2077-10-16 03:10:28.888155713|2035-12-24 +-83|-6513|587797446|8213810702473183232|30050.83|102694.59|-2847724175722.277344|true|zync studies|priscilla ellison|2016-10-03 17:44:33|2056-09-08 06:04:49.641349132|2043-11-26 +75|13522|-1517536924|9054887854393950208|15141.54|1755041.65|-3270453275783.538086|true|education|ulysses ellison|2035-12-08 01:16:53|2070-09-27 08:40:30.393760546|1974-10-03 +-38|-26054|407233168|2514|36614.21|-452732.25||true|zync studies|katie young|2064-11-29 10:35:19|2024-11-14 20:32:43.808212142|2046-07-17 +-28|-4819|1626868156|7735566678126616576|48879.36|1239004.36|-4552439178204.146484|true|education|tom garcia|2019-06-18 18:51:27|2047-06-10 16:32:44.60775478|1998-02-12 +-13|17132|-521886983|695|1944.22|3215015.14|1864369948070.994141|false|education|tom underhill|2060-10-16 03:03:26|2015-04-01 01:05:02.15569899|2070-05-11 +-104|20428|-409673169|820|-9991.69|-2396374.79|1282492179181.637695|false|religion|fred johnson|2017-09-14 06:45:42|2040-12-25 21:39:47.181668340|2070-01-06 +89|12045|1818213677|-7888051992910274560|-9384.53|1975057.51|-1651565773188.927734|true|nap time|xavier ichabod|2050-12-16 14:41:20|2017-08-23 07:23:58.648295477|2022-12-28 +-94|-1613|-1141652793|2563|-5069.95|-1685735.78|1255561990305.942383|false|zync studies|sarah ovid|2048-07-16 21:05:37|2013-04-11 09:25:14.135644374|2076-06-13 +-51||536235636|481|-3326.67|2015657.33||true|undecided|jessica garcia|2030-07-21 20:47:01|2015-02-27 14:42:24.541863875|1999-05-25 +71|-11066|-240529113|9085381906890203136||3623705.69||false|wind surfing|alice king|2023-01-12 14:43:32||2103-05-19 +43|9469|1978171687|8069531888205086720|-30700.99|4531545.89|-4036385352684.703613|false|quiet hour|alice johnson|2072-02-07 18:51:58|2016-04-14 13:33:39.51109536|1995-02-22 +18|-27139|-1909635960|3901|-22165.89|2438006.35|-2457198289952.842285|true|yard duty|priscilla quirinius|2044-02-01 21:07:57|2017-02-24 23:15:38.259931793| +-85|27691|-812431220||-27884.22|-3932474.77|-4301383379646.672852|false|yard duty|holly robinson|2067-12-31 02:16:45|2059-06-14 14:29:47.306981730|2042-06-23 +4|15070|-1048181367|2138|19727.66|-2300068.46|442791232447.667969|false|chemistry|holly robinson|2044-08-04 16:37:05|2044-06-30 10:22:10.259564256|2035-07-29 +42|29721|-534991774|8547243497773457408|-14403.81|1809670.64|3049933582700.781250|true|topology|wendy falkner|2052-03-20 11:57:10|2014-09-08 03:13:41.230670338|2031-09-03 +124|-21833|-1146055387|1914|-16711.13|-1187994.93|-2129923795016.388672|false|chemistry|quinn van buren|2018-05-04 18:02:39|2075-04-11 15:14:21.144748744|2052-02-29 +-52|-20972|-207546600|3462|-22932.21|2274233.99|-1515421803774.650391|false|zync studies|jessica van buren|2055-06-26 09:30:31|2053-04-14 14:07:43.346589567|2040-12-16 +-77|-9494|-1244527286|8631515095562887168|-15537.74|2717635.27|-1618779636424.356934|true|opthamology|bob carson|2022-07-29 22:39:55|2064-12-15 10:25:10.820697739|2012-02-29 +-32|-18151|-20660936|1537|-24087.33|1569221.06|3494016048787.180664|true|religion|david hernandez|2049-03-23 23:12:47|2069-09-06 13:34:29.824569836|1980-09-13 +107|-8241|-1914210382|1032|-494.69|-491580.98|2215415160315.962891|true|wind surfing|rachel steinbeck|2053-09-21 15:24:34|2051-05-31 23:41:33.238902529|2068-11-08 +5|26134|-76430653|-7159700138947862528|-30556.11|-2389481.88|-4148726763102.850098|false|religion|nick robinson|2060-03-21 10:52:41||2054-06-24 +49|27697|-442839889|-7904188195431661568|-34305.21|4500676.42|1725352215904.507812|false|opthamology|sarah davidson|2080-06-15 07:39:44|2080-08-05 07:34:25.775673011|2042-08-24 +-52|7076|1129173487|3071||-900445.66|-3853543995793.309570|true|undecided|luke johnson|2020-09-26 09:44:59|2062-10-24 23:11:30.284403277|2079-12-07 +-43|13595|1042184256|-7571957778022178816|-46981.86||-3145776824458.792480|true|undecided|bob miller|2018-01-09 03:24:56|2030-10-07 14:46:23.841810028|2095-11-19 +29|27796|-851663638|-7220731681653604352|-17833.51|-333198.78|3337165730064.981445|true|study skills|gabriella robinson|2061-07-17 12:49:22|2069-08-24 15:51:48.964464544|2021-02-24 +-19|16950|1127080164|3159|7425.51|-127812.51|-3947313690509.517578|true|mathematics|ulysses carson|2037-03-26 18:40:05|2023-07-14 00:34:28.67758177|2089-03-14 +-6|-23124|-397951021|7843804446688264192|-30463.53|-4069350.60|-2020073902236.276855|false|nap time|katie falkner|2077-05-02 02:07:14|2037-06-27 14:47:44.422604596|2095-06-27 +0|15923|149701884|523|12737.25||1248482752888.797852|false|biology|oscar thompson|2040-07-12 19:39:20|2026-11-18 08:27:34.373542448|2073-01-05 +-70|-695|-1343425152|-7461750143936897024|-32157.17|-2176119.65|-2925113732619.415527|true|religion|wendy xylophone|2076-02-16 19:05:59|2027-10-14 17:29:43.583602554|2097-05-23 +-97|6197|-36682325|-7827420207675105280|-31750.91|-3074549.26|3788972005494.990234|true|biology|fred laertes|2067-05-10 20:20:51|1970-01-01 00:00:00.101484065|2064-02-24 +-10|-20262|-600315936|1345|21913.08|-3070804.01|2962412389913.353516|false||priscilla garcia|2048-07-10 23:38:52|2044-08-05 16:00:46.315151651|2055-07-05 +|-19738|-101960322|-8572823448513445888|25956.38|2092539.40|-2179647850269.006348|true|linguistics|ethan robinson|2057-03-13 06:55:29|2048-01-09 16:37:38.915920932|1995-02-27 +14|15279|1141595012|6947488599548215296|34978.19|-2562363.19|-2815162364953.494141|false|history|xavier steinbeck||2064-09-27 12:29:57.968356596|2056-10-05 +3|-19833|-1484787952|2933|8675.74|978264.91|-1636821663320.553223|true|chemistry|wendy davidson|2040-12-06 19:52:39|2035-12-26 10:35:59.360110196|1981-06-20 +51|-9883|-1454941039|2026|-35426.60|2272684.71|4704286241455.583984|true|forestry|victor underhill|2058-05-13 21:49:48|2068-07-03 13:08:18.155351313|2046-04-14 +122|-31711|-1205034356|-7079898537463537664|-32125.03|-3286002.29|-2908250839324.075684|true|kindergarten||2038-06-26 23:27:39||2070-09-12 +-100|2376|-1538558250|3002|-15010.17|2181186.45|-1509750359093.331543|true|geology|zach young|2021-05-20 14:20:15|2060-12-13 15:31:30.592604154|2073-10-30 +-40|18675|1772545157|3478|4792.97|92850.82|-762670982733.766113|true|philosophy|gabriella laertes|2032-12-22 12:49:50|2071-12-26 20:13:36.328951995|2105-09-04 +-21|15628|-1288198020|91|25691.45|-738032.31|4949606816450.326172|true|geology|luke zipper||2035-10-06 11:14:13.580921847|2053-04-30 +-91|30585|1718167702|-7106210529681350656|24184.47|-4575482.70|-2159863209435.393066|false|chemistry|quinn king|2039-08-20 03:28:40|2068-08-19 10:10:12.74241996|2098-11-20 +98|-22689|776459017|7898670840507031552|-30842.33|1306045.70|-1435567330861.893066|false|linguistics|wendy steinbeck|2020-07-10 23:48:50|2051-04-27 11:27:17.997877424|1997-03-24 +46|-31765|991397535|1280|43342.36|-4767218.51|3617816889074.596680|false|chemistry|luke nixon|2065-03-17 14:37:19|2018-03-21 13:28:07.336943140|2049-03-24 +102||-688296901|7375521127126089728|-28757.68|-2082734.07|777177079642.256836|true|education|bob zipper|2036-07-24 19:19:09|2031-03-07 06:49:22.785267673|1993-03-03 +-31|17958|2029657999|-7744462446680375296|-41109.39|1191929.26|3897628529217.414062|true|quiet hour|mike ichabod|2040-12-31 06:10:58|2058-02-07 15:58:19.354601408|2059-01-15 +-6|13632|160290374|-7296096276653391872|36672.60|-2482472.61|-339458377166.010742|true|study skills|tom hernandez|2025-08-25 10:25:27|2058-10-19 11:12:56.527770967|2044-10-09 +8|-10872|1701761102|-8426531414463545344|32156.69|-3405255.27|-1310101119229.685059|false|values clariffication||2065-02-15 04:14:25|2029-07-16 16:45:16.991648242|2085-05-14 +59|26338|334208532|7514552840617558016|-34440.76|-4404420.91|3330304712948.831055|true|history|bob zipper|2040-05-28 12:09:21|2066-01-08 14:32:11.142741991|2066-09-02 +-21|-15348|-1458382451|-7003696402314215424|43210.70|149052.90|2674338617144.656250|false|yard duty|gabriella brown|2038-10-10 04:17:31|2030-01-04 00:55:23.244799844|1978-07-14 +97|||3456|-38913.18|-4379808.95|363891694968.999023|true|religion|nick allen|2036-06-18 18:23:02|2025-03-15 21:36:35.53561383|2071-07-28 +-59|-6927|-553349593|7621013099259527168|16473.64|-1800225.01|4614724388696.132812|false|geology|alice ovid|2059-04-04 14:47:41|2019-12-24 06:47:20.279929841|2012-01-10 +-52|32124|1888675011|687|13272.28|-2208934.98|-883719278816.102051||study skills|wendy white|2039-05-03 14:20:11|2074-02-04 03:52:12.802260928|2067-08-18 +-98|4809|363981930|2046|-20324.46|4687322.86|3071409665623.399414|false|history||2036-06-02 00:13:09|2066-12-04 17:35:54.613167337|2029-07-06 +-5|7307|-887663189|2412|-49469.35|-2643670.18|691967262474.229492|true|study skills|katie laertes|2015-02-17 10:46:41|2061-06-21 02:39:42.676579556|2036-11-05 +4|-13402|493977568|2803|-1748.73|-3393754.92|-4457211557691.458008||religion|nick polk|2069-06-02 17:34:24|2047-12-25 15:11:53.122432132|2034-03-01 +120|-18214|39723411|-8340523561480437760|-14546.88|-1333507.95|-4486126507516.558594|true|opthamology|bob ellison||2031-10-03 23:35:33.544613811|2094-11-24 +-95|16766|-695775663||-25943.48|-370627.31|56131754853.687500||american history|yuri king|2061-12-26 16:50:46|2066-04-06 09:14:45.591410912|2052-03-12 +57|7678|1500437122|3690|-36458.18|-4405751.72|2469097005156.158203|false|history|bob laertes|2033-05-08 06:57:11|2045-03-02 09:49:45.919117020|1995-05-29 +46|-4910||958|-41418.86|-2450287.31|-173454179858.830078|true|kindergarten|david young|2048-10-13 22:24:03|2033-02-28 07:17:26.103855489|2015-03-27 +12|27077|-310584775|8160662610166194176|-21832.81|1737444.46|-2523815566161.111816||forestry|nick van buren|2051-04-20 02:22:57|2052-12-16 14:54:40.802310365|2010-06-06 +5|-6611|-348628614|7376467688511455232|-17302.69|-4043937.62|2997674791460.696289|false|biology|katie johnson|2074-10-20 08:39:25|2077-07-10 17:11:48.376645616|2006-05-02 +-6|12256|1342923026|-6951350560260784128|-19554.04|-4459872.13|2843148363140.599609|false|wind surfing|rachel laertes|2061-09-16 16:07:08|2039-10-26 13:59:43.783210729|2038-03-28 +58|-25176|-575513309|-8856151919723003904|34314.07|-2099475.91|2897602401976.086914||geology|ulysses ichabod|2062-11-26 01:45:39|2061-12-11 04:52:43.520505413|2068-05-04 +-125|20784|1602631923|2214|39655.33|3039475.62|-3830282459603.928711|true|education|sarah laertes|2076-12-02 21:21:47|2055-12-14 05:18:38.433511936|2054-07-22 +-5|-26284||3534|-32835.86|1787616.77|4409529967528.058594|false|||2025-05-26 05:36:16|2013-05-08 02:45:24.84979575|2012-07-25 +-95||1594107168|2485||-4550897.43|3236841723287.291016|false|forestry|jessica underhill|2062-05-04 06:51:36|2042-12-04 11:51:22.932128563|1998-07-23 +58|-2254|1924741890|-7616522969329262592|40024.13|-2186024.00|-4061500682219.793945|true|topology|jessica quirinius|2046-05-07 13:28:22|2070-04-12 09:09:49.449031915|2084-05-27 +-24|-28647|-1867014618||-4343.38|4997627.14|-1035879213003.773926|true|education|nick zipper|2048-01-16 14:08:49|2072-05-28 11:18:15.805789959|1997-07-21 +21|21081|-1754203978|8004633750273925120||-3602500.25|4760213945363.105469|false|wind surfing|katie miller|2019-05-04 21:41:54|2061-04-11 04:59:35.856248374|1986-05-27 +14|12187|127917714|-8136227554401107968|49429.89|1910729.25|-1069914497388.992676|true|geology||2016-07-17 16:11:05|2020-09-09 09:11:46.601664066|2014-12-10 +118|16105|626251612|1728|-49470.06|3959897.75|-1478106202190.900879|false|xylophone band|nick young|2062-12-10 18:23:18|2051-07-27 17:32:27.385189293|2039-12-24 +-3|-28566|-1745449855|8519937082746634240|32030.87|4816427.45|1633700536419.055664|false|debate||2023-04-17 07:06:24|2058-09-20 02:59:11.375577284|2071-06-10 +-31|-22701|-893863493|681|-36544.43|-2889425.01|1301701521955.678711|false|mathematics|zach miller|2069-09-04 21:21:15|2077-08-09 09:40:16.725515779|1986-09-25 +-44|17014|1305668933|2675|-37536.84|-3389079.95|2568172106802.393555||opthamology|calvin allen|2048-01-07 02:24:52|2052-10-30 09:51:14.703802238|1991-07-21 +123|25603|-1319686435|2140|28726.13|-2052209.10|-2437073639876.423828|false|philosophy|jessica steinbeck|2035-10-21 16:25:43|2047-06-17 10:53:28.279336352|2025-01-07 +-30|-12239|-406264741|8048726769133592576|-33901.62|-3283923.82|-235863245443.930664|false|opthamology|mike robinson|2055-06-28 05:28:20|2039-12-20 02:45:21.514659414|1991-03-26 +64|-15064|-1822850051|7368920486374989824|4239.63|-4074561.40|-2472225769522.573730|true||calvin ovid|2052-03-17 08:41:37|2073-01-09 16:54:02.852079048|2105-04-24 +30|32070|-946349935||49143.26|-4093628.93|1756513093301.918945|false|biology|oscar white|2043-11-23 04:49:54|2062-01-19 06:23:16.207886115|2024-10-03 +-38|31923|-1674623501|2933|-43216.48|-441694.88|2172423919601.769531|true||gabriella ichabod|2068-03-28 20:46:15|2021-05-28 21:18:06.529746759| +|4416|-1423467446|3702|34593.32|-4983663.43|1472966620092.500000|true|mathematics|mike quirinius|2058-11-02 02:19:38|2078-07-10 22:26:27.598943854|2012-07-07 +35|1350|-1439424023|7473537548003352576||-4250668.27|3589868423808.226562|true|education|zach white|2028-03-21 06:07:14|2026-09-22 16:15:50.649708942|2023-12-03 +-100|820|-175727228|7662037650719850496|48160.03|451337.11||true|mathematics|fred ovid|2043-07-25 21:47:47|2025-03-22 08:39:42.572224315|2071-11-21 +67|4111|-1356601829|-7201085131997011968|-4845.62|-2391963.29|-3494275625280.529297|false|linguistics|tom robinson|2013-12-16 15:23:28|2063-01-24 14:36:15.377716525|2008-05-23 +58|-13525|-1668974292|2461|40132.58|-3648308.76|3151275044544.697266|false|quiet hour|bob falkner||2027-07-24 22:57:02.997506179| +-101|-32533|2013178181|-7078068944081002496|-11491.23|1547819.39|1269539510349.381836|false|geology|tom laertes|2046-11-14 17:39:54|2075-02-01 10:01:02.218541617|2060-09-24 +-42|4948|-1216206795|-9213132862973829120|-8676.33|-1161986.52|139544644036.498047|true|religion|xavier young|2052-06-25 02:29:10|2027-11-08 06:13:50.441308738|2035-07-20 +-19|-29228|1851805558|-7442593976514420736|-19417.85|-4185578.77|-3473633863694.054688|false|biology|ulysses white|2058-12-02 13:43:58|2052-03-14 06:31:36.71560104|2073-07-26 +-85|30237|-896274896|947|-41977.40|-4762084.02|3689572984317.202148|false|debate|zach johnson|2072-05-17 19:39:38|2049-04-29 02:45:41.698253842|2071-04-22 +67|-30263|1569269522|2715|-35994.04|-2615857.37|-4704488446544.807617|false|philosophy|ethan xylophone|2066-09-28 13:46:21|2059-11-18 07:16:04.765091082| +-78|-670|-1800413845|7347732772348870656|23004.69|-2233285.92|3739991371485.302734|false|topology|katie hernandez|2051-06-30 10:48:09|2074-04-24 22:45:21.879912960|2024-05-17 +-117|-17297|-1700451326|-8269917980278980608|-17116.65|4912402.10|-3493298815033.440918|false|debate|rachel ichabod|2039-11-24 10:32:35|2077-04-20 18:53:37.535558168|2053-09-09 +-11|23834|672919099|8714829359200747520||3308709.86|-1865156303050.916504|true|american history||2043-04-09 06:06:05|2051-09-30 09:37:51.898928323|2047-10-06 +-54|18071|107941738|8555948987770511360|-28327.38|2324538.32|2847074667998.903320|false|zync studies|wendy nixon||2070-06-25 13:30:35.811325368|2009-04-29 +57|-2827|1450881368|8183233196086214656|-3116.94|4176505.10|765829561496.076172|false|joggying|david robinson|2050-10-24 05:56:26|2061-12-12 04:36:18.893986674|2044-11-13 +123|-23798|908943372||-10047.94|863930.25|1612915693140.735352|false|joggying|fred ovid|2017-12-19 09:17:31|2037-07-21 00:38:21.576873355|2066-02-20 +|-16998|2089198703|6933001829416034304|-33836.46||2102240941480.175781|true|kindergarten|victor ichabod|2064-05-08 21:44:42|2062-01-04 22:58:04.558120645|1982-01-17 +-82|-19048|-1534238977||-23411.14|302494.42||true|debate|katie steinbeck|2060-02-01 20:33:26|2013-03-11 05:00:41.593133945|2093-04-26 +-22|-14675|1081187102|-7540104552219860992|-38953.95|-2362183.98|-1224716349369.181641||industrial engineering|oscar brown|2070-08-13 00:47:56|2066-04-19 15:19:12.320935118|2056-05-15 +-66|17165|156101201|-8368487814665895936|-14140.57|379289.77|1968339169913.875000||american history|katie white|2016-07-16 02:18:55|2042-01-07 04:12:18.268550669|2075-12-20 +120|30166|-357680544|919|-2327.72|-2232224.69|-2115836017507.852539|false|quiet hour|holly white|2021-12-27 01:11:58|2051-02-06 17:57:10.373087732|2082-12-20 +-85||-1992388855||-7366.24|3317647.94|3329868012355.703125|false|nap time|jessica hernandez|2023-04-03 12:56:56|2065-01-24 10:49:07.453432315|1979-12-06 +75|31892|-423945469|3860|-42068.14|2721009.80|-691405603446.639648|false|chemistry|fred brown|2014-07-09 20:18:00|2020-09-15 10:20:06.356886049|2045-09-14 +124|9165|601376532|-9210275791460499456|21504.67|-3603542.89|585382282613.906250|false|values clariffication|calvin miller|2045-05-01 07:46:50|2050-01-16 02:26:01.756278854|2008-03-13 +72|6651|1394370866|2179|-42864.93|2623099.23|127034475536.475586|true|geology|mike carson|2046-07-04 08:48:22|2041-06-08 14:08:48.775940626|1970-09-11 +-82|-10326|196581473|-7040248820505149440|-5351.98|4761488.03|3838487433435.275391|true|biology|ethan johnson|2053-05-17 23:29:12|2053-03-08 00:23:08.194947839|2031-07-04 +109|25118|1765173148|1436|-13638.66|2650629.19|102372940548.889648|false||yuri polk|2073-08-29 12:34:55|2014-12-10 00:21:02.20416566|2010-01-02 +-4|-21281|-139448716|3789|20654.45|2015350.99|-4799397192234.416016|true|linguistics|irene brown|2023-11-19 08:45:47|2048-06-07 06:42:10.786761109|2023-12-28 +106|-32180|-621365995|-8161047750470279168|40550.10|-4846704.32|-2370210197103.510742|false|study skills|ethan johnson|2072-09-28 04:45:00|2041-10-25 04:03:47.934953825|2054-06-20 +64|21103|-1609864597|-8084716955963252736|-49797.47|-2035643.95|-3334485644733.611328||yard duty|ethan hernandez|2019-03-27 18:13:00|2050-07-29 02:49:32.410064002|1985-12-07 +50|15589|-1603071732|3874|22817.88|331598.89|-4244238246714.146973|false|history|calvin nixon|2023-08-20 05:50:25|2022-05-02 03:46:09.440109913|2084-10-14 +-15|6493||-8658387566611996672|46136.01|445923.58|3158702377742.095703|true|joggying|mike garcia|2043-07-07 06:16:30|2056-09-01 07:34:58.429893271|2085-01-07 +-30||-1045771991|-8789178184387641344|37527.62|-4207911.34|-1780358853070.538086|false|american history|yuri king|2025-07-21 20:28:30|2055-10-07 16:55:33.198425093| +40|26664|323817967|8129551357032259584|38950.86|436144.95|2865228012676.333008|true|xylophone band|yuri king|2045-06-06 17:15:59|2050-10-04 12:19:24.850508554|2079-11-18 +68|6379|107680423|8455496814886002688|16783.75|-1796865.52|3914636681057.181641|true|wind surfing|ulysses ellison|2018-04-19 06:54:02|2046-12-01 13:33:44.835934730| +-42|-29864|-1765795567|-7611584069753552896|1400.23|-2198848.86|-4142081795402.248047|true|values clariffication||2078-03-21 16:12:11|2062-12-21 12:39:08.433755127|2002-11-27 +-98|-5374|652118640|9005866015985713152|10952.25|-778975.29|-2123680177187.878906|true|chemistry|holly ellison|2035-10-24 22:37:40|2047-01-30 12:08:45.733801194|2064-06-23 +-82|16082|703111607|-8191825921746305024|29830.11|-2140181.35|413651702499.820312|true|yard duty|nick young||2042-05-25 13:16:01.793578638|2038-01-30 +48|-19357|-327648289|-8719510423723155456|-15826.08|-1399170.73|-1177519032425.173828|true|opthamology|rachel robinson|2014-12-06 03:21:19|2017-10-22 00:02:11.871683|1980-07-14 +117|-28489|29680001|120|-7326.78|-2940796.64|795619083684.435547|false|biology|david thompson|2071-08-07 12:38:00|2029-01-27 18:45:19.757818985|1985-05-28 +84|-14445|955267058|1398|-39118.00|-252708.89|2936056124736.019531|false|xylophone band|quinn carson|2034-06-11 16:49:32|2044-02-16 22:34:53.422119076|1971-05-12 +-22|15176|-1719427168|7204802700490858496|23869.73|-3566161.55|-1108670696092.311035|true|study skills|mike ichabod|2038-07-28 03:06:50|2071-07-28 07:26:48.536104572|2093-01-08 +115|32640||1641|-24629.67|805640.61|2361264049177.335938|false|quiet hour|mike garcia|2062-08-15 18:14:40|2039-10-02 01:48:36.441956539|2028-08-27 +4|16565||9020143715350814720|30989.11|62250.03|-3775876617082.865234|true|geology|mike ichabod|2058-09-10 22:40:14|2045-02-09 04:57:27.469371231|1986-01-12 +107|19968||8391407951622815744|-16291.50|-2197796.27||true|mathematics|jessica carson||2018-10-18 23:42:07.746990788|2098-01-08 +8|30839|2124297747|-8088337436168830976|-2530.91|4326384.64|-3930030546396.352539|true|philosophy|ethan carson|2053-03-31 02:49:11|2077-02-14 19:32:48.817078807|2044-12-27 +-51|23417|-561932449|-9002912355472736256||-3494510.74|193686605544.023438|true|forestry|yuri ovid|2072-01-20 06:44:27|2058-10-22 10:05:58.452026865|2029-10-10 +127|2458|1312270193|-7703540456272994304|-30172.91|-3932244.53|4584335441081.783203|true|geology|oscar falkner|2028-05-09 02:38:49|2061-07-18 05:22:43.486063370|1992-05-17 +|-3896|2125479431|-7528526815026692096|-43588.12|2626041.23|108128490708.878906|false|zync studies|tom davidson|2079-01-11 16:56:51|2021-10-26 01:44:33.360652926|1991-12-08 +66|-26565|1415647436|-7497303453253402624|40063.00|-3378713.78|4711143056921.755859|false|study skills|quinn carson|2063-09-21 11:00:20|2028-01-29 05:55:55.60341061|2053-12-21 +-10|29023|-1349876582|-7404052043914526720|-49017.66|1739116.26|-1136993478948.511230|true|xylophone band|fred davidson|2013-12-01 20:22:06|2022-09-10 20:46:48.711563393|1993-01-01 +-96|13845|1482983157|-8194062064124362752|-11244.55|-159728.26|1309790215609.823242|true|debate|quinn johnson|2063-04-01 07:33:34|2013-11-29 03:31:42.159871063|2047-11-27 +-2|-5216|-423074450|-7500200359698907136|-30084.10|-3880377.83|1722452513891.284180|true|chemistry|irene ellison|2057-08-26 16:24:51|2072-08-27 10:55:23.742757053|2034-06-07 +94|26031|374283948|-8469607298426437632|37962.54|-2096075.50|-3681863458112.886230|false|forestry|wendy laertes|2030-12-24 16:17:15|2039-10-14 15:12:53.586673558|2089-05-30 +92|8188|-789126455|8716401555586727936|-20231.51|-869679.31|-4521385333389.105469|false|kindergarten|irene steinbeck|2045-03-29 19:31:48|2065-03-17 00:26:26.825546265|2011-05-24 +44|-18581|-1749415887|-8509547439040757760|-22655.82|-1388039.31|-1825647192295.107910|true|religion|ulysses steinbeck|2032-10-19 14:19:15|2023-10-26 21:55:37.177075833|2053-07-04 +-92|-10015|1184001017|1142|-33050.51|4481379.80|-1053461687358.634277|true|biology|ulysses ovid|2014-03-06 00:20:33|2045-04-09 08:35:52.499479468|2087-01-20 +-54|1436|152654715|-7928062266382778368|41161.73|-1889940.58|-1575161140707.663574|false|biology|xavier steinbeck|2060-01-09 17:08:08|2026-04-18 07:58:06.16296875|2058-08-10 +-13|4230|1130840708|2463|33676.84|3194960.57|2435882345912.432617||biology|zach garcia|2057-05-30 09:45:00|2033-04-10 09:34:42.495803643|2020-12-01 +25|2295|-1202975006|1556|-20807.16|-2353771.77|-1904146904419.441406|false|joggying|alice ichabod|2026-12-30 03:35:56|2022-03-21 10:42:40.306953000|2044-07-03 +-69|1519|-369183838|-7329807949048193024|-32663.02||3844582365590.625000|true|values clariffication|wendy xylophone|2080-05-28 15:57:10|2047-12-16 10:29:14.386661377|1981-07-08 +-69|14041|-295751373|2805|-13866.52|4333070.89|-274202051963.641602|true|education|ulysses carson||2023-02-08 20:21:21.62389399|1972-09-23 +-46|-21695|-1111814111||-39900.23|3075898.85|-1328765175408.754883|false|values clariffication|irene van buren|2068-06-24 09:20:11|2078-04-06 07:36:13.609048906|2084-03-02 +33|24776|-1341627565|1086|-25329.13|-4124697.39|568946558863.494141|false|linguistics|fred van buren|2016-11-25 15:42:53|2041-09-06 16:44:15.753021799|1997-04-05 +88|-24305|-2017279089|584|-14782.30|-4427868.96|-4580945829519.827148|false|xylophone band|alice ellison|2025-04-02 00:57:40|2065-12-15 09:01:25.455146608|1992-09-23 +85|18508|-933324607|2986|-35539.03|949235.35|2150838152903.382812|false|values clariffication|rachel young|2077-06-06 13:55:44|2060-03-15 13:08:33.223456909|2062-03-17 +-57|21469|-1305139473|8936639033158410240|-14758.73|742696.42|-3129953345578.364258|true|joggying|mike underhill|2050-05-16 08:16:39|2022-06-09 23:24:09.470631530| +-31|-4209|-1261099087|663|-32989.53|-2638594.96|-379965508311.385742|false|values clariffication|sarah van buren|2018-10-27 03:59:19|2052-04-15 01:32:00.994348144| +25|-15493|-2076460151|917|14582.04|206182.79||false|history|bob ichabod|2062-06-18 12:11:36|2072-10-22 14:39:08.102756440|2035-09-15 +-127|-21432|1392980712|-8896045754034978816|33746.10|1369438.32|-2213339183595.266602|false|linguistics|victor carson|2063-07-11 11:19:36|2059-02-03 05:23:38.742424491|2081-11-25 +-18|4446|-1627366321|3770|19646.12|-1061360.32|3763670834676.986328|true|history|david allen|2060-06-09 15:38:25|2076-04-15 14:40:26.65920865|2105-03-29 +-49|31651|1742536084|181|20539.33|1900029.03|-973711957609.921875|false|industrial engineering|sarah robinson|2058-09-14 09:13:51|2049-11-22 16:20:46.456017322| +-121|-30818|461680901|3879|25996.87|1045218.96|-3744622671193.091797|false|opthamology|quinn zipper|2029-12-11 08:15:14|2035-08-10 11:56:46.326756601|2098-04-10 +-104|6115|1271280812|8268875586442256384|-46035.48|3944258.09|444985822411.005859|false||mike hernandez|2053-08-11 14:07:25|1970-01-01 00:00:00.326170366|2005-06-25 +57|24436|1660278264|8230371298967609344|-14743.67|-864865.12|-3684060933780.451172|true||priscilla ellison|2050-01-30 06:06:03|2041-02-12 06:14:03.898656769|2001-12-14 +54|-3179|-1210907929|-7784419454650843136|31849.45|-3799672.87|1576487530284.466797|false|american history|yuri ellison|2068-10-11 21:35:15|2025-11-07 17:23:48.618067004|2043-08-30 +10|21509||7259955893466931200|-47613.45|4839854.05|2427453521622.489258|true|mathematics|quinn garcia|2016-10-15 22:05:46|2068-02-07 16:25:49.828674|1996-07-12 +-45|-16301|-359943425|7310869618402910208|9811.11|2506299.82|4772172041585.029297|true|nap time|victor miller|2056-04-13 20:03:18|2044-08-02 00:28:03.490555974| +-105|-25596|1372982791|8665969966920990720|33573.48|-1657931.01|1984118625141.562500|false|zync studies|ulysses robinson|2040-06-03 12:52:31|2019-04-20 12:32:05.699693227|1980-07-11 +-116|10216|-1826997220|7997694023324975104|-47516.30|-2963202.09|100917821438.599609|false|forestry|luke polk|2052-05-02 04:53:51|2021-12-10 05:20:23.715804563|2041-05-16 +8|5661|-797889292|-7194281951646187520|-22733.41|1685818.62|-4299495592291.648926|true|yard duty|oscar miller|2081-01-22 19:21:59|2014-08-23 04:40:02.148690660|2072-09-22 +-81|11299|2045579147||-6003.77|2350857.41|304950002790.534180|false|biology|jessica van buren|2063-09-17 16:53:04|2044-12-20 20:38:31.329673698|2064-04-08 +-12|-9566|-507015439|-7413317118463164416|31895.75|-433518.57|365292387716.635742|false||holly underhill|2021-09-06 18:29:08|2021-11-28 08:11:22.448374557|1982-10-11 +118|-12506|1166237779|8910706980937261056|22112.37|2939657.23||true|nap time|irene steinbeck||2044-06-03 15:58:30.165260645|2064-10-28 +91|-32364|-457341338|-8131997716860526592|-45833.01|-2185113.98|-3337207914447.510742|false|biology|nick robinson|2050-06-22 23:36:26|2051-12-11 16:48:50.166285024|2082-01-03 +120|22111|-449333854|3036|-47682.26|4533889.39|-2373794013216.148438|true|zync studies|katie white|2066-10-24 10:18:00|2079-07-09 12:23:14.869010625|1979-04-16 +62|-24422|-1939362279|779|41770.63|-3030328.24|-3036341908371.618164|false||sarah carson|2038-12-25 12:38:22|2016-03-10 19:56:02.617048560|2043-05-14 +105|-7871|1677444379|462|-25466.45|-2333073.80|1573764982827.370117|false|topology|david laertes|2065-01-23 17:35:30|2030-05-28 19:57:50.523892948|1978-12-02 +92|22013|1934970004|1530|-31612.20|386374.79|3919658011295.154297|true|wind surfing|rachel steinbeck|2043-06-01 15:48:11|2072-02-23 16:08:41.7277206|2066-10-17 +-7|31706|480849725|2420|-34144.44|1948672.56|-2495375067634.100098|false|industrial engineering|priscilla ovid|2019-04-23 17:17:50|2041-01-13 14:08:37.798985197|2086-01-28 +-5|31374|-1989378509|1620|-42264.62||-3891991449183.755859|true|history||2025-11-06 15:53:38|2049-04-01 09:50:27.173079026|2094-09-06 +0||-619311578|-7603467428164009984|41313.92|-2867281.50|2333079974592.848633|true|forestry|||2047-11-03 06:58:16.269601873|1996-03-21 +87|13048|-1058356124|8900180888218329088|46143.53||-1070361491662.197266|true|topology|jessica white|2047-07-05 22:45:05||2048-12-11 +-88|-21233|467753905|8145745969573666816|25312.60|1049802.86|-2723673201054.843750|true|yard duty|gabriella xylophone|2070-11-24 04:08:34|2026-09-28 18:14:59.105449930|2006-10-08 +91||436093771|3866|-20507.42|-4036924.13|3015854329390.493164|false|chemistry|katie davidson|2062-02-01 01:28:52|2023-12-07 18:06:55.295471618|2009-07-21 +-40|-26367|1638471881|2942|-20839.12|2334055.71|-1009017992118.472656|true|values clariffication|quinn xylophone|2024-10-31 09:37:15|2035-05-26 20:30:15.166412370|2096-08-22 +|-15341|1284956108|2714|-24368.52|2762099.65|996794591008.445312|true||ulysses johnson|2036-05-10 07:43:41|2014-11-17 17:41:54.569317179|2004-02-18 +-81|-28536|964810954|-8664806103426252800|-3895.82|1239798.71|1460043982310.139648|false|chemistry|tom ellison|2073-06-09 02:04:54|2054-03-16 19:45:38.587152452|2029-08-04 +-102|4781|-134686276|-7245872320493322240|-39160.59|1738195.03||false|zync studies|oscar nixon|2043-12-25 19:27:04|2036-11-22 08:14:18.398127889|2044-08-20 +|18190|470575409|3677|15296.19|-3367111.19|2375368805782.116211||biology|jessica falkner|2020-03-17 05:08:10|2017-07-24 14:20:17.704096216|1989-09-07 +-120|29775|-1544877665|3322|-30539.75||-2169994076424.543945|false|mathematics|katie quirinius|2068-07-30 00:23:54|2076-10-31 16:35:34.618086233|2071-05-31 +69|-14561|-337829479||-17108.31|4350734.10|-321356327348.425781|true|nap time|yuri garcia|2022-05-20 06:16:32|2074-05-07 23:37:52.880290719| +-53|20814|-1621721177|2843|45481.88|659779.85|-144425647229.944336|true|yard duty|nick thompson|2070-10-03 22:05:16||2011-04-23 +66|24975|-1727003541|3770|-48598.17|2248549.81|2442081049155.789062|false|quiet hour||2057-08-21 06:32:31|2052-10-01 01:14:42.357763204|2082-05-31 +80|19454|881695885||-24555.54|-2658541.95|-3999216119225.792480|true|mathematics|zach laertes|2058-09-06 18:18:39|2045-03-19 01:11:10.357277543|2088-11-30 +127|-20559|76299337|-8347088645602050048|33581.96|3373684.98|-2291600612129.158691|true|values clariffication|luke zipper|2045-08-02 04:19:46||2003-04-23 +35|18372|375106978|-7558524160894427136|17438.11|3055890.03|-2851108972106.182129|false|mathematics|luke ichabod|2014-10-12 03:59:44|2064-05-10 04:50:52.176951865|2045-04-26 +12|-2546|-932525608|1128|10538.74|-213150.40|3852649211677.494141|true|forestry|zach quirinius|2062-05-02 08:03:33|2054-07-23 19:16:01.54567994|2011-05-24 +-99|6734|-752222556|-8030058711611629568|-9999.43|2222603.87|-1177370723477.686523|true|philosophy|tom allen|2064-11-30 12:33:07|2038-05-12 08:15:21.550815629|2062-05-10 +-84|-31828|1640192895|-7085247548404178944|11396.60|765437.70|4376234981030.542969|false||fred laertes|2031-05-12 17:02:46|2020-06-02 13:39:29.887480414|2075-10-23 +98|-27358|-1908696083|-7524170566881329152|33328.47|4906241.93|-4200209955921.394531|false|industrial engineering||2013-10-21 14:51:57|2065-02-15 11:01:46.758468368|2031-01-03 +29|39|-1969235238|-7612466483992051712|-21834.37|-753997.20|4139418130730.519531|false|religion|fred thompson|2013-07-08 01:25:24|2041-08-16 15:21:08.955635598|1978-03-09 +97|-5435|1925283040|3858|-44322.83|-4029585.87|-160952372089.369141|true|values clariffication|yuri white|2056-04-24 04:26:00|2074-01-27 09:05:58.787171815|2105-07-12 +73|1864|-374337252|-7185369278665605120|-5602.25|3122723.45|3298229895790.285156|false|education|bob davidson|2057-05-25 11:17:25|2025-10-22 01:38:03.980293239|1972-11-27 +70|29922|-43858652|8294315622451740672|-29448.03|-1778079.57|-1766076763805.628418|false|zync studies|mike king|2065-02-07 13:17:50|2061-06-29 03:21:18.864990826|2007-01-29 +-38|18482|1027147837|3749|-11458.85|1833652.80|-678647292518.519531|true|education|ethan zipper|2076-07-22 09:12:19|2070-12-01 19:35:01.281100277|1976-05-24 +66|1100|-1116100266|2475|14297.80|-2892537.32|-3946454621441.901367|false|kindergarten|priscilla ovid|2026-12-27 17:59:09|2031-08-26 16:20:21.731841751|2056-12-20 +-75|-244|-828724467|7017956982081404928|13959.52|3637484.91|-967659211294.602539|false|kindergarten|tom king|2024-05-05 02:41:05|2017-05-13 18:14:40.518511433|2089-08-08 +0|28774|-214166042|8996824426131390464|-8379.85|3716914.13|4222254163433.164062|true|religion|jessica falkner||2062-05-22 16:30:19.873937390|2037-10-21 +98|-23943|704038411|-9008631121684832256|11467.22|-26109.85|1453933622072.760742|true|quiet hour|quinn van buren|2031-01-15 10:24:44|2080-02-20 18:34:29.393914150|1971-04-30 +-44|-22919|1059212450|-8664374244449050624|-43783.06||-2744206179838.357910|false|topology|tom zipper|2078-04-16 22:09:57|2015-02-04 03:02:42.444900448|1990-10-04 +-96|28828|1194243726|-8022573309127000064|-36993.05|-3350574.73|-3446729979624.407715|false|education|luke miller|2049-03-01 14:30:12|2033-07-10 21:42:22.862245209|1993-11-26 +43|-23250|826519029|7260908278294560768|28434.22|2489106.74|-827966036176.519531|false|joggying|holly hernandez|2046-10-24 15:08:00|2018-04-28 21:52:16.144224143|1983-06-29 +-109|11159|-1146649990|-7804116532814151680|-21512.36|-2988451.20|-767455514749.215820||xylophone band|ethan ichabod|2028-08-10 15:01:03|2071-11-24 13:43:25.874441396|2018-11-26 +-31|27028|1102561039|-9032650742739836928|45143.72|3309952.46|2242882207203.713867|false|biology|priscilla carson|2040-05-20 00:52:26|2039-07-07 05:12:55.557873894|2050-08-13 +-31|8018|-1469463456|501|-9472.19|2090864.64||false|values clariffication|ethan zipper|2059-03-13 14:20:43|2050-12-12 12:51:46.639473654|1972-01-12 +35|24178|198539698|-8023708819947323392|22201.65|411700.32|110901550184.152344|true|study skills|priscilla allen|2042-03-09 10:00:23|2039-05-17 07:47:17.35897251|2015-02-04 +72|-10874|917891418|7099005292698550272|-41703.56|-239441.80||true|history|quinn carson|2022-05-02 06:51:14|2074-02-24 23:11:20.544869728|2040-07-11 +-99|-9619|-1364322216|-8555709701170552832|-37994.57|4764958.77|-2213290379214.534180|false|opthamology|holly hernandez|2059-01-01 11:35:05|2054-07-17 23:18:57.923510131|2070-05-01 +31|-22423|-1884780525|1021|1397.74|1201328.01|-4818799249138.656250|false|religion|oscar xylophone|2018-08-20 16:36:19|2078-04-14 04:08:29.133505551|1995-02-09 +74|13161|170870820|837|29320.25|-2472720.99|4744229264480.814453|false|chemistry|alice ichabod|2030-01-04 19:06:17||2042-10-11 +-103|-33|25400543|-8659692318743314432|36914.91|-1276086.36|-3921251458029.359863|false|yard duty|quinn robinson|2031-12-09 16:11:01|2078-04-26 16:53:21.302790369|2099-01-27 +-105|-21292|1359437295|918|-3658.01|1712092.37|1174966655510.374023|true|quiet hour|rachel white|2075-06-01 13:42:17|2061-08-03 05:14:34.975643283|2074-11-30 +|-705|-817383093|3231|17633.05|747243.64||false|history|quinn davidson|2077-07-22 03:16:53|2034-06-11 20:57:54.636482005|2026-01-01 +|24299|1731764471|8811693967537774592|-31379.03|-2858399.87|-1035856578130.811523|false|undecided|katie quirinius|2059-04-11 00:04:58|2023-04-09 00:06:25.50734754|2049-12-22 +20|32584|1336194583|425|-48765.66|1611818.56|3031299564927.358398|false|joggying|tom underhill|2051-02-13 22:52:50|2071-09-27 23:40:33.597399987|2090-03-28 +127|-6806|1605596441|2223|-15150.22|-2788232.30|4969239348148.341797|true|kindergarten|calvin ovid||2049-10-17 14:26:44.325906064|2053-07-28 +|-16477|989475408|2025|19325.70|3820886.26|3959945144125.960938|false|topology|jessica brown|2076-09-09 00:56:05|2043-04-12 08:47:11.743040247|2054-07-01 +48|6080|748358417|1366|10522.55|1308549.86|3204335645975.468750|true|mathematics|ulysses brown|2068-07-20 07:06:56|2065-02-03 07:51:53.142464311| +-23|-24763|1363459426|3183|-1941.42|2866084.36|4090868873717.638672|true|religion|bob laertes|2054-10-11 05:20:09|2017-05-18 01:39:50.669692033|2083-08-13 +|26952|477584560|8639254009546055680|22309.89|-773630.55|-2616912485449.312012|false|topology|fred steinbeck|2055-06-12 13:09:32||2041-04-05 +6|-13607|1404346934|1701|9755.15|3694127.29|2289362105608.816406|true|mathematics|irene quirinius|2029-12-28 04:20:07|2021-03-23 01:27:22.684453883|2062-05-26 +69|12779|117620760|2144|35154.87|-74893.94|-60821955725.600586|false|opthamology|katie polk|2023-07-29 05:56:18|2077-04-30 23:25:52.819734472|1973-06-20 +71|19874|-954480325|812|-29151.55|-583085.51|-2116792328856.504883|false|topology|mike thompson|2022-07-04 08:33:09|2054-05-06 18:42:54.372981849|2101-01-25 +50|-4676|-1198036877|3029|21369.18|-1981569.84|-1564171250410.737305|true|mathematics||2024-06-20 15:56:00|2040-03-30 06:03:01.174329441|2068-07-02 +-72|30184|177294487|3021|5165.49|-1596340.34|1223117998739.837891|false|education|tom quirinius|2042-07-11 09:42:25|2036-11-21 13:02:42.103331236| +29|-7738|1626884085|-7831595638727565312|-45050.94|-2700638.86|3865143383275.117188|false|forestry|nick robinson|2063-12-16 11:44:53|2062-05-28 04:19:35.174802501|2078-01-13 +3|21529|1570238232|7291432593139507200|32858.59|-4388204.21|-210573866194.288086|false|study skills|david polk|2024-02-02 04:09:29|2038-08-21 04:22:50.431607050|2040-05-27 +-17|28899|-758231588|3144|36652.92||-1900552843483.183105|false|study skills|calvin steinbeck|2049-10-03 07:06:12|2020-03-06 22:26:13.607554719|2035-11-03 +|-16282|1061638369|1777|-4384.35|-1244086.91|-2637519110440.145020|false|debate|mike polk|2063-04-01 14:18:27|2071-09-05 06:27:11.573643641|2019-10-13 +-117|5805|-2117280385|2786|30428.31|-1847649.14|3514157597185.949219|true|mathematics|zach xylophone|2044-12-26 14:06:27|2065-02-21 06:38:34.747815475|2076-09-01 +53|-24478|759899363|-7055760785575665664|-38851.48|-164936.61||true|opthamology|wendy van buren|2036-11-09 12:45:04|2015-03-19 16:03:44.687802082|1974-07-12 +-14|15342|-742707249|997|-30055.88|3904057.55|2666224881009.426758|true|quiet hour|yuri underhill|2022-04-18 18:27:22|2058-06-03 07:14:44.710973048|2029-09-09 +17|18558|1544482684|-7455898404374921216|21695.62|1828986.91|1486379335536.440430|true|xylophone band|priscilla quirinius|2019-04-18 10:14:07|2065-10-13 03:03:50.175251946|2035-03-25 +63|2756|-1651993300|8156782979767238656|47445.89||-1961119985887.940430|false|mathematics|yuri steinbeck|2075-10-02 19:48:35|2080-04-29 16:47:10.830876109|2000-06-11 +-84|-22960|-2042647152|-7511202710200885248|-24386.13|-4029840.95|-2510830722895.000977|true|geology|holly steinbeck|2029-02-11 00:58:36|2047-02-27 12:50:23.888314937|2094-07-31 +-80|18972|658850444|8254763178969915392|21450.96|3702206.03|-3674276371457.132812|false|joggying|ulysses ovid|2039-06-13 12:28:06|2016-06-11 08:22:07.949640945|2072-04-04 +112|31432|-1030565036|7921639119138070528||2660603.95|-1690952529497.664551|true|xylophone band|xavier king|2017-06-10 07:41:38|2021-08-11 18:06:21.956845249|1985-03-21 +-60|17086|-592568201|601|5540.34|-2955362.85|4784206322848.660156|true|wind surfing|fred steinbeck|2060-06-12 10:32:48|2060-05-18 02:32:45.427275127|2005-04-15 +-37|-15538|1493152791|7581052107944361984|20231.52|-3867808.16|-1930084301499.235352|false|values clariffication|ethan allen|2025-02-09 11:05:22|2070-09-16 10:31:49.168754255|2078-10-09 +83|17769|-707228984|1477||1933098.47|1243678323415.799805|true|topology|alice ovid|2041-12-22 11:47:06|2029-11-10 21:46:09.465549350|2034-01-19 +84|4332|994798486|-7273590251991162880|-530.49|-372447.19|966744556747.407227|false|xylophone band|ulysses ellison|2057-07-25 02:50:33|2055-03-26 10:14:50.431828635| +-4|-23271|-359194591|1094|-9368.19|4069501.92|-4572923026255.822266|true|study skills|zach young|2051-09-13 17:10:12|2023-04-17 06:52:55.145926793|2021-04-06 +-55|-20876|-1270523286|378|-44353.44|2396737.53|4229227938355.820312|false|joggying|victor nixon|2063-08-30 21:44:45|2029-06-14 05:52:34.105866037|2094-03-20 +|-20218|791096295|-7665186441284968448|-20412.10|2217637.27|4562991879297.548828|true|topology|david garcia|2054-05-10 11:07:00|2041-04-17 03:22:13.766692104|2069-11-17 +-39||48554395|3554|-24292.94|4604419.17||true|wind surfing|gabriella allen|2023-09-06 06:55:15|2072-07-16 06:46:24.328637434|2029-10-13 +-41|21665|1990792684|7231399302953377792|-38445.21|3349715.89||false|quiet hour|nick carson|2079-07-05 20:05:00|2034-12-18 22:02:47.270122839|2070-09-19 +-50|27071|-1274158260|8220104397160169472|30089.83|2297877.70|4300108680631.070312|false|yard duty|oscar ichabod|2046-09-17 01:47:55|2032-08-28 15:40:30.150960444|2034-11-01 +-82|-5259|-1901806083|1606|8656.31|1686951.72|-2805163217886.743652|false|opthamology|luke xylophone|2047-07-10 16:16:02|2056-07-18 04:15:36.890442784|2024-11-03 +-116|-31793|-870900240|3812|-45329.51|3797720.43|-4712635243486.857422|false|american history||2035-09-17 10:15:34|2029-07-13 09:16:29.231146829|2065-07-22 +-15|-15119|-1648991909|-9051477157204770816|-155.56|233740.40|-4028341779707.886719|false|xylophone band|holly white|2062-07-18 02:55:15|2018-03-05 18:18:02.658727871|2055-02-14 +125|26292|1336951982|965|-3145.16|-2870316.24|1200365494539.638672|true|opthamology|oscar hernandez|2021-08-15 21:22:48|2077-01-25 12:45:13.576960421|2087-07-20 +-124|-25624|-1424027104|8290014929764040704|33446.06|-2015309.65|4621409183423.708984|true|mathematics|rachel laertes|2030-07-13 15:41:34|2024-07-19 05:43:44.761137371|2068-11-04 +74|-15819|1393506704|-7333278178640953344|-41521.39|-1382657.57|1818449023106.509766|false|values clariffication|david white|2013-11-13 01:21:21|2051-03-30 03:43:58.63225731|1977-02-08 +-46|-21358|1595326878|-7494411162675691520|47402.29|1481302.07|-737916756281.720215|true|biology||2014-11-01 08:48:04|2080-07-22 02:50:15.245713301|2067-08-04 +78|-3061|-1688105985|-7845896959112658944|20424.90|-2200853.82|-4030721564466.972656|true|forestry|sarah garcia|2048-06-07 09:27:06|2056-11-07 17:02:53.875147020|2050-01-23 +121|-27998|1893632113|1506|46216.74|-4246065.27|941594019679.499023||quiet hour|priscilla quirinius|2034-04-02 17:55:45|2070-12-01 11:40:21.22381104|2063-03-18 +79|-11015|882331889|1307|15201.15|-4985467.57|2728236854644.627930|false|xylophone band|katie zipper|2068-07-16 18:28:04|2027-01-31 13:47:37.483422716|2073-06-23 +-2|-14597|922553769|8697823501349609472|34404.90|1448595.84|-30527549927.832031|true|nap time|jessica ichabod|2041-11-09 07:07:21|2049-09-22 14:17:28.756379035|2104-09-14 +-83|-3373|-1231821948|7232273749940838400|6870.08|231266.91|-3756701107128.712891|false|biology|luke young|2077-10-05 02:38:04|2028-11-04 08:15:44.533514578|1987-12-07 +-93|16217|914583645|-7777884099756122112|-28620.37|-4465513.64|915225716928.311523|true|undecided|nick hernandez|2076-08-16 00:10:11|2032-07-12 08:42:03.425331560|1991-03-30 +-25|12471|860708524|244|-9606.38|-4397701.63|1992695582220.882812|false|topology|yuri steinbeck|2028-05-04 05:13:05|2073-02-25 13:42:06.958715276|2023-02-03 +-20|4104|-837506172|7130159794259353600|10637.11|1082684.90|-1061846410311.524414|false|biology||2069-07-05 15:43:40|2015-12-06 23:34:18.460119776|2105-03-02 +119|5639|-425103007|2264|-13547.98|-4258720.44|-2078162482098.839844|true|study skills|tom brown|2030-05-29 20:15:30|2028-06-29 09:07:43.498893365|1979-07-10 +-34|10646|-1081766449|2524|37570.72|-2194969.24|2620228350592.164062|false|forestry|holly ichabod|2078-09-09 01:05:14|2055-06-28 04:30:12.285923687|2033-11-09 +34|26796|1942004879|-7058986555327307776|-31641.81|-1131793.64|-3407576334614.113281|true|philosophy|katie xylophone|2059-08-22 02:49:28|2066-05-01 16:28:29.327975427|1978-10-29 +45|-27705|-1817564067|8287522765741301760|-43897.38||3907121274240.595703|true|wind surfing|rachel davidson|2047-06-24 16:49:48|2056-07-02 13:50:19.90463468|1983-11-05 +45|32348|713031549|238|-38634.07|-1450667.10|-409310416914.650391||education|oscar johnson|2054-09-19 05:27:23|2029-10-02 14:05:57.413615201|2032-07-08 +46|27675||8905330479248064512|16198.16|-2632400.27|3886641749701.169922|true|joggying|quinn johnson|2019-02-05 14:05:06|2070-03-27 04:03:58.235564696| +97|-9378|-1302592941|2565|-10209.72|2747639.04|4786387366438.126953|false|joggying|quinn steinbeck|2071-10-12 12:46:09|2025-04-20 22:17:35.854855645|1979-12-26 +116|-12647|-1439293109|-7464270453557993472|15349.77|1434424.80|-135137939502.065430|false|geology|priscilla nixon|2033-12-27 21:48:03||1991-03-04 +92|-11247|1386071996|3059|11401.50|-1896215.16|2347644267651.176758|true|study skills|bob zipper|2021-06-29 12:07:50|2035-09-11 08:21:19.174222330|2044-05-14 +89|32734|1190554937|8806507556248731648|38206.52|1976527.21|640421772016.566406|true|american history|bob ovid|2033-02-11 01:46:00|2053-06-28 21:09:19.628354599|2011-01-20 +26|16592|1645067708|2325||-2622604.31|2980127550702.285156|true|joggying|luke ichabod|2014-01-04 06:06:19|2071-10-17 06:17:30.135307685|2060-03-21 +11|29507|-1709246310|279|43863.50|1759736.14|-567393323943.683594|false|zync studies|yuri polk|2015-11-21 13:48:31|2073-07-25 22:12:29.641294618|2031-06-22 +124|8811|710856472|-6919476845891313664|45495.65|-1060105.47|-1430527601193.496582||biology|bob allen|2073-08-12 19:38:25|2068-08-15 23:13:27.942081256|2060-07-10 +21|-31130|832465439||19127.41|3475056.36|1333770358662.747070|true|values clariffication||2035-09-22 06:31:52|2025-04-28 10:40:21.453847657|2028-12-29 +-87|-17082|1695098246|-7949445503604604928|-16712.01|2247741.82|2302730983386.708984|false||holly underhill|2018-11-30 10:49:53|2045-02-23 00:16:16.133285975|2038-12-04 +-21|30367|-1348149160|296|38647.80|-4078029.68|-3561424322605.582031|false|zync studies|oscar hernandez|2067-04-23 03:33:33|2074-06-26 17:31:32.359037592|2029-03-23 +73|-6046|-1801684055|2776|-11660.33|-92644.47|2813453631092.279297|false|history|calvin steinbeck|2035-02-28 14:50:25|2044-08-20 09:47:08.372477879|1990-07-26 +-61|-11571|-625788713|-7541860097718902784|-39240.88|-2131339.22|1280986883304.074219|true|yard duty|katie robinson|2077-04-16 13:56:13||2077-07-22 +127|13877|-1444011944||-36685.74|1068545.94|-3123990427797.806641||xylophone band|jessica miller|2075-04-07 22:10:16|2026-04-16 15:24:05.757112195|2058-08-30 +27|-7204|177837042|1290||-4706307.91|1894298448371.713867|true|joggying|katie ovid|2078-11-05 16:33:02||2047-08-01 +75|31390|-655118881|2463|13931.63|2353987.25|342502558193.693359|false|debate|bob laertes|2029-03-17 16:57:54|2025-11-21 16:07:39.717499006|2051-09-09 +-108|4475|-1798573685|-8604758220106014720|-35702.79|-3538650.87|-2894179103280.372070|false|geology|ulysses white|2055-10-08 11:07:12||2083-01-26 +93|27830|-1743938290|7741854854673367040|312.82|-2185403.32|-1734036711805.160156|false|values clariffication|holly quirinius|2054-04-04 15:20:53|2069-12-15 04:38:47.560052946|2001-05-04 +-88|-3286|-1762037754|2551|29018.36|4267392.41|-779756981097.534668|true|study skills|ulysses van buren|2065-03-07 08:23:35|2032-11-15 17:28:07.414658349|2098-04-02 +22|-13008|-838656526|-7055619148037554176|-44259.43|-4710319.66|3995534775984.523438|true|wind surfing|holly allen|2031-05-11 03:04:40|2062-06-20 09:51:32.16783993|2002-09-12 +120|-13393|-1228063838|7779735136559579136||-1650324.63|-2119718598874.378418|false|kindergarten||2075-08-13 15:11:52|2016-03-18 12:51:06.253191429|2082-02-12 +105|||80|34755.38|3808187.81|-1326205474195.525391|true|yard duty|irene garcia|2059-04-26 10:19:24||2083-01-17 +|-21156|-16094879|-8430283518005846016|-48730.46|220370.23|3855982173238.068359|true|nap time|quinn king|2079-04-23 14:15:58|2037-09-06 10:27:52.111597670|2021-06-05 +-101|4447|-2057666812|-8017791189288869888|29088.20|2208437.63|2276256855650.693359|true|linguistics|fred thompson|2019-11-10 00:52:12|2048-05-17 18:03:07.315321116|2085-08-26 +22|11617|-291577538|-7109790267244814336|-1758.84|2108616.98|-3091881671406.149414|true|yard duty|irene white|2019-05-05 18:18:26|2043-12-01 03:20:17.297879695|2034-09-15 +44|-19926|1121512594|9136548192574529536|7809.10|-3179961.55|-2697911245110.624512|false|linguistics|xavier king|2025-06-19 21:15:27|2026-11-23 19:22:12.931539836|2028-07-04 +25|-7932|-88576126|3866|-40180.34|-3442455.88|-673567578957.623535|false|history|ethan allen|2060-03-25 23:31:50|2073-03-15 11:17:08.52042007|1997-03-03 +46|-9148|-1125605439|2587|-14408.82|2954854.93|-3234402189604.033203|true||ethan zipper|2062-08-28 19:22:21|2052-12-12 17:11:46.580071441|1986-09-27 +86|-6306|-2086352100|3199|-9645.35|2612095.15|-4830215356785.387695|true|biology|holly king|2079-09-19 10:54:47|2047-06-03 07:24:45.53853551|2048-06-24 +-46|7821|-1117358187|-7052619594823221248|-45636.56|-1371279.81|607728743085.544922|false|xylophone band|ulysses falkner|2072-05-17 01:14:52|2073-12-22 21:30:25.133271709|2097-12-08 +11|20243|-1953605752|7500716020874674176|47152.07|2436880.55|4872992409355.773438|false|xylophone band|oscar xylophone|2077-11-13 07:21:25|2053-06-18 19:20:52.189371821|2037-08-07 +-75|14493|-1754347372|375|3920.39|-3542726.69|4037902414412.287109|true|chemistry|nick allen|2076-10-19 14:22:33|2067-01-02 19:16:01.197865810|2039-06-16 +-103|-9734|1106995930|-6962271229404348416|12236.89|3788033.07|-2209324511157.362305|true|american history|sarah ovid|2070-10-27 10:44:34|2016-08-04 23:50:29.753019864|1993-08-01 +-48|4715|-1721763321|-8016589197379289088|-30530.24|-1805122.07|-2176554594563.242676|true|american history|david davidson|2061-02-18 16:14:05|2023-11-29 03:29:35.375798915|2019-08-18 +-44|-9845|825977391|2680|6546.23|1003889.62|1794109717716.807617|false|biology|irene young|2023-12-29 09:25:18|2062-09-02 19:34:08.201742643|2042-01-05 +-126|-4808|1796013407|9086905513121890304|-30837.31|-2784508.91|-940462286027.212891|true|forestry|wendy underhill|2046-10-23 14:11:02|2046-06-08 03:07:27.448513337|2072-11-10 +33|-19020|-1527024213|9043089884440068096|-6538.53|2109588.86|-4192509371312.880371|true|american history|ethan laertes|2075-06-03 19:46:22|2016-07-20 02:06:17.231768834|2068-08-25 +41|-3517|-483740394|7039820685967343616|43693.06|-264459.25|-4706316500277.694336|true|debate|ethan polk|2042-08-17 16:30:23|2057-07-20 03:42:57.471171665|2089-01-13 +-78|-2993|-18917438|-9088239683374350336|21322.28||-3152751111265.139648||wind surfing|sarah nixon|2069-12-22 02:28:56|2020-01-14 00:49:35.464276768|2080-09-25 +-33|-17166|-23865350|7461153404961128448|-43841.82|1786124.64|-1312361707205.056641|true|mathematics|irene falkner|2043-05-29 19:58:06||1999-11-24 +73|9017||-7011425384222244864|7463.53|-912611.18||false|industrial engineering|calvin steinbeck|2076-01-13 05:42:08|2031-03-22 04:03:05.11520590|1970-12-22 +110|-23248|-2052386812|-7849504559236210688|18164.03|-4745727.57|-3268497309599.815918||religion|tom robinson|2035-02-18 06:36:09|2054-12-27 11:57:51.540094893|2101-02-26 +-78|-22941|-379174037|8518454006987948032|-16028.01|1101655.31||true|linguistics|yuri robinson|2079-01-29 02:22:51|2030-04-08 12:53:40.462715567|2009-07-15 +118|-13603|-896261100|-8108693586698706944|-37725.47|1643492.81||false|joggying|katie davidson|2056-03-16 00:41:42|2047-08-08 10:30:43.684357090|2041-08-09 +4|-11165|-87470856|-7835907977757245440|47766.70|-172736.58|-705268148248.308105|true||irene miller|2017-11-26 23:29:39|2049-02-04 04:05:19.789876244|2082-06-03 +95|-7531|-1431196400|3769|-11554.25|-449966.63|4127334068106.699219|true|history|ulysses johnson|2015-12-12 21:46:10|2036-02-03 09:18:11.872922402|2084-09-09 +-53|25197|-2053551539|6987889924212203520|42481.70|-2814180.11|-2222751654694.086426|false|philosophy|jessica zipper||2041-04-26 16:48:25.147389412|2035-11-10 +27|-31454|516479816|-8454143651040444416|21048.16||85228167341.461914|true|education|wendy ellison|2035-04-12 17:01:03|2038-09-21 20:57:42.397475607|2053-06-29 +98|20939|696229550|3588|-33537.03|-4795897.68|972612471920.477539|true|history|bob brown|2077-08-12 19:51:35|2074-05-26 22:09:41.804789098|2016-02-17 +58|18555|15020431|8195103847607967744|-14811.21|4118824.54|-1765284631805.848145|true|industrial engineering|ulysses zipper|2067-11-06 17:46:46|2061-01-21 01:12:17.706664569|2041-01-13 +105|-13847|1535954353|2002|-34929.28|4171958.52|4981340994597.296875|true|joggying|xavier carson|2068-07-08 07:54:35|2078-11-06 16:58:50.207226583|2014-02-14 +1|4939|2081152819|1386|47521.27|1005940.42|-271834636299.068359|true|linguistics|gabriella underhill|2027-09-30 17:03:39|2075-07-06 03:25:36.109827301|2076-03-27 +-15|-19343|1136976809|-6960947572095770624|-21777.67|-1396434.27|3880893659215.835938|false|education|katie garcia|2076-04-17 21:34:09|2029-04-22 02:08:55.846697941|2057-07-27 +0|-19427|-402441123|8282648443538710528|-2461.87|390965.06|-1946353660340.598633|false|mathematics|zach ichabod|2018-05-20 06:00:09|2060-04-07 09:23:55.211875207|2106-01-06 +81|-17502|1142481557|3648|-15271.49|3837206.12|2711396060126.780273|true|opthamology|mike falkner|2047-02-03 13:28:18|2068-11-19 13:47:25.883917641|2089-11-03 +-3|18240|1385883394|3245|49733.17||2590076423511.717773|false|chemistry|fred brown|2037-04-04 09:34:12|2056-08-29 20:37:59.589930416|2039-08-03 +-67|16236|-1759354458|169|44818.98|3676983.55|-2521939566095.312500|false|values clariffication|mike ellison|2080-05-18 03:39:05||2033-05-16 +-75|32420|368170021|1745|-31913.00|3098988.31|-1709482427198.700684|true|values clariffication||2050-09-10 15:48:50|2043-03-17 01:49:01.211358054|2040-12-06 +123|-9798|1022214896|979|-34858.06|-4611759.94|3132623148434.739258|true|religion|luke quirinius|2047-05-03 15:17:41|2014-04-23 09:46:34.314946856|1981-03-29 +56|-22949|1352649032|-7161165959057334272|-30633.03|-2172059.93|2676086958402.770508|false|debate|ethan miller||2035-03-25 07:11:42.630584685|2031-12-02 +-81|23910|-816661030|7753359568986636288|31497.00|-2540156.82|-855079143516.164062|true|mathematics|priscilla steinbeck|2043-01-20 17:52:19|2027-10-18 07:59:41.254276161|2077-10-14 +-6|5601|1395450272|-8054581198284668928|-8542.91||-1606030317476.374512|false|biology|zach xylophone|2078-12-27 11:24:15|2045-02-28 05:02:44.915979901|2044-05-29 +107|11999|1517915751|-8357136656913686528|7849.61|644546.42|-4762355116194.392578|false|xylophone band|quinn ovid|2031-03-07 20:01:49|2066-06-23 18:44:02.59637674|2035-11-21 +-111|-7201|402173272|8244041599171862528|13246.24|1865645.47|2451208712660.139648|true|opthamology|quinn garcia|2021-01-27 17:43:56|2034-08-09 01:39:56.465266766|2062-03-01 +84|28641|1550112473|2105|-34839.48|2206374.90|2481329266919.338867|false|american history|victor carson|2072-09-14 02:39:11|2074-11-11 20:35:41.528267430|2059-11-30 +83||1991072829||-19752.73|3615299.21|4961477513340.923828||linguistics|mike ichabod|2039-05-25 07:13:14|2043-05-14 11:35:08.371631978|2070-10-02 +-42|-24890|619884480||-43634.20||238695595845.186523|false|quiet hour|katie king|2025-12-30 16:23:29|2070-11-07 19:58:58.701587107|2074-04-07 +6|1773|945911081|1201|12149.74|-4310588.56|-1511638497761.680176|false|xylophone band|xavier miller|2038-02-01 00:45:45|2080-09-05 12:52:47.148409132|2085-08-31 +-15|14021|-504529358|7401968422230032384|-37534.75|4201171.03|2444925405174.487305|true|nap time|mike allen|2050-09-18 11:04:33|2014-11-29 23:07:36.744244067|2099-03-11 +5|31688|-1078214868|3286|30636.18|1173971.62|-956227486151.846191|false|wind surfing|victor ellison|2043-08-31 03:34:52|2035-03-16 01:11:26.192346613|2098-03-16 +-45||-1196101029|-8300764106868350976|21928.17|-4647014.51|4956420840249.898438|true|zync studies|david king|2055-08-25 19:03:20|2064-10-30 07:03:06.481596802|2061-07-28 +77|-10392|-733756717|3206|25255.20|1494571.87|2019642227208.784180|true|education|fred zipper|2068-04-16 20:36:33|2036-03-15 04:50:30.944587266|2009-04-21 +-48|-232|391186487|8351163199364390912|-18364.70|3128590.69||false|joggying|nick allen|2044-10-24 10:03:44||2064-10-10 +94|10273|-1079086534|8643198489997254656|-28079.60|-1967168.29||true|xylophone band|alice polk|2073-07-18 21:48:47|2079-08-17 09:20:08.850511965|2062-04-19 +7|-15024|1164895226|7647481735646363648|-41404.65|3363381.29|2413359348945.647461|false|history||2058-03-03 02:48:59|2039-11-24 21:06:19.627853209|2055-01-10 +115|20680|304860245|-7017212700635545600|6444.21|244053.33|-1143755671374.719727|true|biology|mike miller|2056-04-10 11:28:10|2041-05-22 04:06:30.995080646|2014-06-22 +91|12251|-1605045257||-17097.62|482477.24||false||calvin ovid|2075-04-13 21:29:46|2053-05-26 09:51:59.650152309|1977-03-25 +-14|-8003|-287400633|3235||2684560.92|-4335231265805.497070|true|opthamology|gabriella thompson||2064-05-22 23:35:52.607098998|2061-12-27 +74|41|-950738312|-7902517224300036096|4116.21|-698800.30|4481184305574.998047|true|quiet hour|ulysses johnson|2070-11-24 13:03:07|2028-10-12 01:59:46.270334023|1983-09-01 +40|-9130|-454598288|-8763062627136864256|51.13|2342049.79|956991212270.068359|true|forestry|xavier young|2072-12-09 00:10:59|2073-12-27 09:38:03.702010794|2096-04-13 +-115|-15009|184879574||38210.14|4964842.06|-2681134255346.719727|true|chemistry|quinn davidson|2043-08-04 17:45:28|2045-11-25 04:36:59.505962074|2021-11-09 +-63|-7846|-181523892|470|16397.14|-125897.17|742930822037.095703|false|opthamology|sarah quirinius|2071-01-13 18:34:42|2077-08-20 23:35:14.712549137|1980-03-14 +-14|-23852|1506907734|3913|35054.27|-2107960.80|-1463820488416.942383|true|study skills|wendy quirinius|2023-01-13 14:14:04|2057-01-17 13:52:55.519677224|2064-09-04 +-60|30199|194754262|7700734109530767360|26124.21|2765313.26|3771365519269.522461|true|religion|luke carson|2064-02-22 13:50:58|2036-05-10 22:37:32.467231653|1976-06-17 +60|8387|856986735||-19157.65|-512847.22|2926691512865.459961|false|quiet hour||2058-08-23 02:49:07|2034-07-24 01:45:54.602453003|2072-12-07 +77|2952|471464395|9209153648361848832|6685.64|-2455826.48|4693950276041.505859|false|xylophone band|calvin allen|2073-03-12 16:42:15|2042-07-08 16:12:36.775680419|2062-06-03 +123|-17772|592011541|7800332581637259264|-18090.32|259758.35|4594723649601.845703|true|quiet hour|tom garcia|2035-03-13 20:09:48|2019-07-21 13:09:55.617059503|2089-07-01 +123|-458|-1668736016|-7488345684795342848||1522581.57||false|american history|xavier allen|2024-03-10 00:44:52|2032-05-10 21:40:53.24143541|2060-12-13 +-61|6741|-1851280202|1524|36849.46||-433620929878.735352|false|nap time|fred hernandez|2062-06-20 15:38:46|2062-07-10 14:51:52.294404788|2021-10-08 +-30|-25469|479566810|2217|-38032.89|-1659376.79|-4610473792118.376953|true|joggying|oscar brown|2043-09-29 16:49:00||2088-09-17 +-79|4363|1012843193|-7939634346485858304||3845381.58|3440846254288.523438||joggying|ulysses xylophone||2020-01-27 18:28:16.376587873|1993-10-24 +-79|12954|-987995271|1937|7424.16|341212.02|-487096778003.710938|true|forestry|oscar davidson|2018-12-23 22:26:32|2019-03-25 02:15:59.24225770|1988-12-07 +-28|-8532|-1621814212|3352|-47452.44|-4528499.24|727025749396.936523|true|values clariffication||2036-03-03 21:26:26|2050-10-30 09:36:39.757209520|2092-12-08 +93|18140|63706286|8416121695917498368|-18882.25|2096358.06|2805544800868.311523|true|joggying|katie johnson|2045-02-05 07:32:27|2079-12-28 08:07:27.500711737|2033-09-11 +104|16856|414645489|-8875546987176206336|34734.07|3589029.79|-1419347451799.346680|false|industrial engineering|yuri young|2067-10-30 06:36:21|2032-02-29 15:20:26.937853261|1988-12-30 +-67|-7172|-1726585032|8532016240026279936|-49278.84|-589792.32|4737405563798.056641|false|biology|katie polk|2057-01-13 10:38:05|2015-03-25 23:38:45.889000866|2026-03-02 +-77|-2421|-1129489281|7581614118458335232|49489.68|432826.84|2527871145229.347656|false|mathematics|tom ellison|2013-04-12 09:56:41|2025-02-17 21:40:46.548907004|1972-06-16 +78|-13325|-1095938490|6923604860394528768|-49801.89|-2087305.57|3113776542345.157227|false|wind surfing|victor robinson|2064-07-08 19:36:42|2064-03-07 00:58:07.871862639|2094-12-24 +7|4952|127051381|8269730157217062912|-12009.11|4435291.94|4069009831908.552734|true|undecided||2055-01-29 22:59:56|2056-01-08 06:29:39.525903717|1973-04-25 +-35|-10420|-641062448|9000633029632499712|45756.67|-1660339.14|1993433319579.516602|true|zync studies|wendy carson|2069-08-25 17:57:16|2016-04-07 00:36:30.755764143|2058-09-21 +19|-2060|1579460630|8501910015960735744|24887.52|-833297.43|3466184527016.445312|false|debate|xavier ellison|2018-09-09 10:58:08|2023-11-28 17:44:14.558302737|2095-05-06 +-96|28272|1505168716|-6957946688477274112|35800.40|4715751.89|1708736522212.355469|false|geology||2021-11-22 12:53:31|2067-03-03 12:29:45.24728387| +-110|29461||-8870186814744420352|39663.58|-1044012.98|-4034310183267.031250|true|joggying|calvin johnson|2044-07-05 17:23:38|2034-05-08 14:54:47.672602183|2040-08-16 +64|-24682|765656980|1299|14893.14|1641660.72|-2013823095120.237305|false||victor thompson|2053-04-23 20:28:25|2042-02-05 04:17:17.132619322|2104-01-25 +1||1669519977|8637720762289659904|26267.29|-4516062.72|246182939496.897461||history|wendy ichabod|2065-09-20 07:24:40|2014-01-02 11:21:40.566693934|2052-05-27 +15|19887|252169185|8945004737083555840|-12612.21|4149648.09|-4341775662685.307617|false|yard duty|katie laertes|2035-09-17 05:19:17|2069-08-09 05:52:56.114170070|2083-11-15 +34|-28784|198624903|2517|27453.46|1518776.32|-1471054463871.937012|false|biology|fred ichabod|2046-08-14 08:04:09|2044-10-11 08:36:22.926939719|2090-05-16 +-127|32553|-1021859098|-8302817097848307712|-11149.18|-2437950.40|-946633441440.099121|true|education|irene brown|2063-09-02 16:37:30|2019-05-13 03:05:18.641320363| +124|-8795|-1998652546|7779486624537370624||2528507.35|2879335255910.149414|false|geology|ethan thompson|2049-05-22 16:27:13|2020-11-20 22:09:26.377089870|2039-09-28 +36|20018|-206177972|1559|-40816.50|-3511360.32|-2221464435443.110352||wind surfing|david hernandez|2031-12-27 03:06:23|2059-11-15 02:37:15.170292953|2075-05-30 +-28|31721|1061043704|-8857335871148171264|-12918.52|-38641.55|4703857380405.943359|false|religion|mike underhill|2060-07-21 18:18:58|2040-07-08 22:45:42.452848655|2061-02-07 +85|28041|-914329027|-7483435388852559872|-29353.33|868819.00|252695926368.993164|false|xylophone band|zach xylophone|2074-11-30 03:33:42|2048-01-23 07:20:44.942585519|1972-12-16 +|-1801|1882932986|9123116008004288512|-43889.06||948962599216.933594|false|forestry|ulysses nixon|2050-08-03 16:31:40|2028-09-27 10:14:59.638179206|2075-10-19 +1|30824|-1620148746|-8870673219965001728|-9644.39|-2615257.04|-590567582566.130859|true|geology|nick robinson|2022-05-24 22:07:33|2057-08-15 08:00:07.417243385|2032-02-22 +23|-12709|522895626|7675009476762918912|17878.90|-3823010.71|4734519952118.283203|true|xylophone band|luke davidson|2023-07-09 03:48:05|2080-04-02 06:38:12.273277791|1978-04-07 +52|4701|-194270271|-8369487968903897088|-33434.02|1928184.34|1550969498394.075195|false|biology|luke johnson|2055-02-26 20:12:00|2078-04-28 10:06:15.628763762|2024-09-14 +-18|20059|-269702086|7696737688942567424|14979.21|2983124.17|-1261539436734.899902||kindergarten|holly ellison|2056-01-14 17:12:37|2016-10-27 21:55:00.912383100|2004-06-11 +86|-19493|-1249134513|2688|-24448.16|1690441.85|-36925882832.363281|false|yard duty|priscilla underhill|2070-04-18 02:04:38|2027-03-11 23:19:07.275313466|1970-08-16 +-7||-1031592590|-8665218198816497664|-49187.69|-1845604.18|2373751745590.091797|false|biology|yuri miller|2072-06-23 02:53:38|2017-08-30 07:57:22.485892580|2060-03-24 +-53|12836|-956668825|1827|10782.13|2658398.19|-3649350823703.407715|true|undecided|ethan van buren|2055-11-28 11:03:39|2058-07-16 01:23:50.688228488|2094-01-17 +-92|24800|-187804718||-34396.23|1256811.05|635566643755.645508||kindergarten|calvin miller|2058-12-13 04:37:48|2033-02-18 00:21:03.74380398|1999-06-14 +-94|-14229||-8051871680800120832|36978.46|8746.40|677892679777.975586|true|forestry|david nixon|2049-04-27 09:55:19|2052-01-16 04:01:56.86501132|2066-06-22 +-53|-15573|-1968097621|-6988811476286873600|-31404.41|-1270151.69|451552081463.013672|true|geology|ulysses hernandez|2042-10-27 23:41:07|2031-11-25 18:43:16.703929105|2052-04-30 +82|-22558|-1851680302|530||-2459528.49|2017693075329.844727|false|kindergarten|fred van buren|2019-05-18 13:07:16|2035-12-07 23:47:30.423041352|1990-01-04 +67|14741|1911809937|7254710367022645248|23334.72|2955683.52|1823728420987.791016|true|religion|david allen|2063-11-07 08:59:59|2019-08-19 23:55:40.683089130|1988-12-20 +90|2338|434679307|-7547432761381339136|40122.49|1638995.20|-1793950003583.972656|true||katie carson|2039-08-03 21:14:14|2013-12-17 22:58:55.203140416|2049-03-16 +56|17942|1196151988|-7793447076762345472|43985.13|1259578.26||true|mathematics|jessica king|2033-01-12 03:08:20|2038-05-07 09:16:07.387632543|2062-08-17 +78|-19677|210003006|-8275337702906757120|33709.19|-213567.45|-385927812330.848633|true|yard duty|xavier falkner|2037-06-03 13:04:21|2040-11-28 19:30:11.841438888|2088-08-19 +|8461|26270580|3163|27085.78|2144385.55|2630943635207.962891|true|xylophone band|oscar hernandez|2053-02-05 12:41:40|2023-01-15 18:16:38.213552938|1971-05-27 +|28447|1851654062|7752740515534422016|27798.56|3416784.34|20640213342.773438|true|biology|yuri carson|2033-06-02 21:04:35|2055-01-26 20:18:56.128349625|2078-09-21 +16||-1818380492|7229607057201127424|-6638.74|2938918.87|2397048290628.553711|true|chemistry|jessica young|2070-05-09 23:42:32|2068-06-20 13:17:26.507571586|2066-10-04 +-27|10161|2133950868|187|43780.68|-1353404.78|-738475896233.375000|false|joggying||2046-01-19 02:09:30|2040-09-19 20:13:05.945264942|2036-07-29 +-99|-31352|-800975421|-7928440849566146560|-32803.70|-3003401.85|466466073068.722656|true|opthamology|rachel thompson|2020-06-14 21:26:19|2078-04-19 21:12:47.390943907|2007-09-09 +116|9572|722737062|3073|25851.02|3372247.39|1614826288195.096680|true|geology|irene brown|2062-05-21 03:11:07|2042-04-08 23:21:52.201251505|2053-04-29 +91||-598552521|2193|27740.67|-315369.76|4421973594170.865234|false||holly carson|2063-09-19 12:55:59|2075-02-09 18:58:12.645566188|2004-02-13 +-16|-5085|141492068|3781||3218604.70|879896284672.340820|false|geology|ethan polk|2052-04-12 19:44:52|2065-08-11 21:02:50.944679982|2016-12-16 +102|-9724|1891680787|-8471480409335513088|41053.01|984367.37|3440534496297.118164|false|religion||2057-10-27 22:09:07|2070-05-04 15:21:27.9482115|2053-03-19 +-92|-13979|-1744964279|8649296591032172544|-45056.99|3362549.51|3102286513091.457031|true|philosophy|oscar zipper|2043-02-10 19:53:35|2070-11-01 07:21:36.170308409|2025-05-02 +-65|-9584|1577999613|597|4985.91|1880545.24|-3349604045804.355469||industrial engineering|quinn ovid|2051-01-07 13:40:27|| +-105|2652|1916363472|7555301305375858688|21572.88|-1819115.45|3657730313242.275391|false|values clariffication|xavier white|2019-06-06 06:26:40|2028-03-02 22:15:43.240150893|2022-10-29 +61|22786|-1218871391|3253|-20047.92|-53015.66|-4751615903566.808594|true|joggying|priscilla steinbeck|2048-05-01 11:17:49|2061-12-12 04:16:26.695958830|1979-03-12 +-117|530|1182595271|1177|10603.88|3576926.09|3001390738523.106445|false|wind surfing|sarah allen|2073-10-02 15:42:08|2030-07-14 13:39:35.421270606|2097-08-19 +96|24366|-1897998366|2625|19945.77|1993941.86|2592254154334.021484|false|values clariffication||2045-03-23 23:26:22|2051-06-27 16:46:44.304739590|2005-05-06 +-114|28048|1205391962|8761174805938331648|-49476.80|-1404260.08|3630295597472.575195|true|undecided|calvin miller|2065-06-11 13:09:11|2059-08-09 10:22:26.530521665|2047-12-06 +-65|-24092|-1924909143|301|46556.96|3019231.60|-3856846735252.439453|true|wind surfing|tom young|2046-03-08 04:23:10|2038-08-24 19:32:01.250688548|2061-08-07 +-53|10430|-2074079977|-8619303037130301440|21003.68|-4519948.31|-917875940005.537109|||wendy white|2020-02-03 08:09:25|2037-07-10 17:28:08.36879102|1993-04-12 +-40|25847|85774760|-7469660864676585472|2717.41|-2544321.29|-1329085295185.627930|false|biology|mike underhill|2019-10-05 06:20:38|2039-08-01 13:21:38.927895088|2038-03-19 +52|24488|476858779|8524940073536954368|39068.98|3046959.73|742304885811.541992|true|wind surfing|wendy underhill|2080-05-21 22:53:55|2042-03-17 05:22:32.978021331|1995-09-05 +108|-2825|-1578387726|8508401924853850112|907.11|-1412437.77|-4678527618393.424805|false|wind surfing|tom thompson|2018-01-26 03:16:42|2079-01-23 07:03:55.66706647|2060-09-08 +106|-21723|290601612|878|-20465.49|1566976.52|4478891480861.371094|false|industrial engineering|calvin white|2077-05-30 22:04:30|2061-09-29 21:38:16.202014149|2040-10-07 +68|30764|435407142|8045070943673671680|-23282.77|3828837.00|-3478315502065.250000|true|debate|david allen|2025-05-11 07:27:17|2024-01-26 11:46:49.108538087|2041-06-10 +59|-25531|1191238870|3613|-13461.54|-1092506.14|-340655977401.489258|true|wind surfing|xavier xylophone|2035-11-11 03:06:23|2048-04-08 16:13:43.746833506|2001-08-17 +52|-27372|2005560498|-7637494527844343808|27509.37||-4283050885345.431641|true|debate|alice king|2058-07-02 23:26:25|2053-04-22 16:17:40.18639872|2027-07-06 +98|-17531|1516165279|7691062622443044864|-36668.66|-4406594.53|-2061696768746.728027|true|nap time|zach johnson|2046-09-30 07:44:28|2050-12-21 02:31:12.227479202|2016-06-07 +83|17503|-2022383454||3336.91|1474693.58|-2286393982365.453125|true|industrial engineering|gabriella underhill|2061-11-20 02:36:04|2022-10-12 03:47:56.597404938|2089-08-02 +17|-16622|1592467112|8560526613401714688|-27329.68|1226927.48|-232640950614.637695|false|debate|yuri thompson|2024-05-30 02:53:16|2015-09-09 23:54:29.708837580|2085-12-17 +-25|25967|1283898734|2262|24968.04|3262090.00|4353073330160.626953|true||irene nixon|2073-12-17 12:27:16|2031-09-14 09:17:05.348520933|2000-03-19 +62|-2559|1572563948|-8359839265974165504|-33838.90|-4109950.64|-3713793513165.875000|false|joggying|zach laertes|2014-01-12 21:09:50|1970-01-01 00:00:00.991169007|2038-11-29 +97|-21388|1679381813|-9105358806324035584|34430.14|4484140.95|1916514605589.583008||values clariffication|rachel ovid|2065-05-02 04:11:48|2077-12-27 17:24:10.329858586|1989-12-16 +-111|19862|541118710|8415171956168417280||407180.01|-4745445588033.021484|false|wind surfing|calvin van buren|2057-11-28 00:27:38|2057-07-27 01:50:59.230905693|2029-12-24 +102|-25939|-1106469823|255|-41132.62|-4325896.28|-2743119448924.531250|false|forestry|wendy thompson|2054-02-05 14:03:39|2076-11-22 01:15:27.802163435|2050-04-05 +-48|12358|447426619|-8536369662934401024|10900.76|-2464556.18|2596086249181.297852|true|geology|alice quirinius|2067-04-30 18:40:19|2068-10-21 11:18:10.630254740|1976-12-14 +-98|-15526|-858439361|-8940944155843461120|-14413.34|-1088581.16|-3114508048729.405273|true|biology||2055-12-16 19:34:27|2013-10-29 15:37:53.839949676| +103||1447462863|7384150968511315968|-33050.99|2251130.90|89779906883.365234|true|education|tom nixon|2049-07-01 15:35:57|2075-11-25 09:04:53.147016081|1990-04-20 +100|-11048|1807358029|263|-26675.46|-752421.26|-4890530939167.305664|true|history|calvin brown|2020-09-29 16:13:24|2048-06-07 18:52:55.422743536|2070-12-23 +58|6675|-1802746460|1217|-27085.73|4833180.32|-4240406364923.501465|true|religion|mike robinson|2053-04-02 08:27:04|2072-01-18 15:26:17.374305609|2026-02-01 +55|-12295|1861276585|-9091113592821972992|-37828.36|-676599.33|3467837381964.145508|true|forestry|alice underhill|2072-06-22 14:20:58|2032-03-07 17:27:47.232386485|1970-10-08 +-104|-21025|-1565785026|8666178591503564800|-19191.27|-363010.37|-2215881470668.960938|true|wind surfing|nick garcia|2053-06-28 08:16:37|2077-05-30 20:37:52.294863246|2019-03-28 +60|31396|636901402|3512|45564.68|2216449.54|2498973745429.750977|false|american history|bob quirinius|2062-12-15 10:55:15|2055-11-30 15:08:05.855950992|2074-01-29 +|-16518|-308225568|8472429318602268672|-27618.83|1397035.62|-1426238002392.174805|true|zync studies|yuri white|2058-05-30 12:42:49|2014-01-04 07:40:44.718673628|1971-03-10 +38|28413|-469870330|7061809776248545280|5038.36|4899225.93||true|american history|wendy van buren|2052-03-21 12:31:39|2041-01-06 14:03:47.179206937|2074-03-12 +108|-5135|-2042831105|6969599299897163776|-40691.70|-1564446.85|-4298929701839.924316|false|philosophy||2033-10-13 05:36:52|2014-06-18 16:42:39.89307305|2079-04-19 +-34|11168|1846184880|-7535857766791577600|-12097.91|296884.75|189835086132.674805|false|wind surfing|zach ellison|2022-01-21 10:58:22|2014-08-14 21:48:41.258440751|2058-03-24 +42|-7323|1625751062|3724|19175.45|-4645302.81||true|yard duty|jessica nixon|2046-12-23 10:11:15|2040-05-12 13:49:04.312695207|2067-12-19 +-101|20084|343362793|2786|-5759.71|-3710545.39|-747309143932.177734|false|xylophone band|nick johnson|2055-11-22 06:32:07|2032-12-27 00:39:24.505679054|2013-11-15 +33|-18263|735732067|1948|38213.05|2668590.35|915569735455.541016|true|quiet hour|rachel ovid|2018-03-02 05:25:36|2063-07-27 10:41:50.110054963|2046-11-16 +-54|-21795|-1017629298|-7328087811698909184|-20010.24|-1103528.09|171257816833.722656|true|geology|wendy brown|2022-12-25 05:14:20|2069-06-10 23:30:18.814929160|2078-03-20 +16|-14888|1190302173|7753882935005880320|-13338.98|-3830938.65|-16966857031.200195|false|kindergarten|priscilla ichabod|2079-09-20 10:22:11|2066-05-11 11:44:44.967158232|2058-05-02 +-104|22213|-1818456584|-8384695077413412864|43493.35|-828620.69|2303273645231.165039|true|nap time|fred garcia||2058-03-20 03:18:32.441094681|2005-09-08 +76|-26087|-295186284|-9075302542655684608|-16572.92|2456302.57|1695145412960.172852|true|mathematics|rachel thompson|2067-11-29 10:49:14|2023-02-24 06:11:31.127903962|1970-10-04 +-105|-4953|315973457|-8844949406948671488|-9860.94|998897.88|-4775905812492.879883|false|education|fred davidson|2025-09-30 18:11:16|2025-06-11 16:43:58.941547150|1999-12-25 +-127|16110|904604938|8337549596011102208|2848.64|-3418610.14|-2009642318242.228516|true|debate|mike steinbeck|2023-04-26 20:45:39|2048-06-21 02:28:33.302451076|2068-01-05 +-109|31986|-1218581850|-7624057992767782912|-9642.07||4524736841293.623047|true|chemistry|ethan ovid|2046-01-30 17:53:58|2064-06-05 07:51:39.21714322|2097-09-03 +116|-32119|-2144241640|9073672806863790080|-7691.49|-4810810.06|-3529351191949.139648|true|zync studies|quinn ichabod|2025-09-14 05:57:47|2052-08-21 07:01:03.126893909|2101-08-31 +-69|-28864|1766517223|7948803266578161664|-33985.35|3550700.26|4086092318223.570312|false|debate||2071-01-12 01:01:39|2059-09-08 02:39:24.970547582|1989-11-05 +-13|12814|881673558|7566273236152721408|-4594.45|-3286472.17|423483325042.503906|false|linguistics|nick underhill|2023-06-25 22:14:52||2064-02-13 +4|-10513|536876888|7062382339142156288|13262.02|4083053.68||false||wendy young|2073-07-01 02:10:21|2068-11-13 07:56:42.534738020|2026-07-12 +-109|-20188|-1289665817|7909645665163804672|42529.03|2938682.72|4492997493634.832031|false|values clariffication||2036-02-03 11:35:01|2016-09-22 08:56:11.169028039|2065-08-11 +-33|20120|-340462064|-8086577583338061824|30435.53|4916056.68|2409115470740.985352|true|zync studies|calvin quirinius|2052-08-12 00:26:09|2019-08-25 23:11:02.671444276|1991-04-05 +-82|-23630|1425456189|-8122639684164501504|13589.93|-4405035.26|3998360085714.693359|true|geology||2055-05-29 17:37:08||2006-11-30 +-18|-19786|-1642207005|-8117838333114212352|17411.05||1866266907288.434570|false|wind surfing|sarah garcia|2028-01-29 13:44:26|2039-09-01 15:14:46.549821420|1975-12-21 +-83|23910|1376818328|336|-27132.88|1204353.22|2806463824394.356445|true|undecided|sarah ellison|2080-10-30 17:04:15|2065-12-02 08:54:56.954538295|2014-02-02 +111|32231|-1919939921|2320|-6568.58|1215239.64|-1869700480167.893066|true|kindergarten|xavier van buren||2016-07-10 10:53:59.626066496|2021-05-21 +-86|12915|-1434562279|-8731068123910987776|-8835.22|-1335137.34|-1538575730095.495117|false|american history||2061-12-03 00:29:54|2072-08-27 13:08:21.852423838|2016-07-12 +-107|-13|1070989126|1906|-12761.32|3064168.48|-3462773458406.651367|true||fred carson|2018-12-15 02:28:19|2071-03-20 11:13:17.416475043|2018-06-15 +95|-20789|1028204648|-7838598833900584960|1716.08|-2554550.12|4714480698801.324219|true|geology|bob falkner|2045-07-31 00:13:55|2059-03-29 14:57:28.222857121|2099-08-04 +-80|-8852|477857533|1165|37201.95||-1261362806924.886230|true|forestry|jessica ovid|2062-04-27 20:14:10||2064-08-16 +|-14783|-224865887|-7456869587112255488|5122.61|-2102976.63|1531516008788.393555|false|values clariffication|fred nixon|2048-10-12 23:56:55|2026-06-07 14:49:02.306537960|2089-05-15 +-15|17581|1142098316|2013|6003.12|-1151629.14|4348345225726.326172|false|history|yuri van buren|2024-12-02 01:39:53|2073-03-07 20:51:20.326376818|2050-01-09 +-8|-24885|-332125121|7333512171174223872|26535.92|2399386.54|-2960944140681.114258|false|opthamology|ethan white|2079-02-15 19:05:52||2087-01-27 +-89|-10935|1240875512|-7571293705217687552|21259.00|-629766.26|-1636595770961.833008|false|american history|david quirinius|2033-01-07 17:02:27|2063-10-15 04:11:02.388323396|2096-08-29 +-19|6306|-590374062|-8523434203900674048|28590.02|1265094.91|3833331173378.199219|true|undecided|victor thompson|2056-03-02 15:07:50|2047-02-04 15:17:32.647082767|1990-11-23 +-39|-16680|492968645|8099215208813903872|11064.77|750795.33|-3997116358934.107422|false|zync studies|sarah underhill|2032-12-13 09:50:46|2029-10-11 21:31:15.496921510|2001-01-25 +-48|-30157|-1635301453|9040958359122640896|42146.27|-1984020.04|-3919914678779.932129|false|yard duty|luke ichabod|2050-05-21 14:47:18|2072-06-27 06:27:52.850736616|2012-07-08 +118|-21389|1319589591|-7356685674003021824|48287.81|4413852.26|4754891500688.814453|false|philosophy|||2031-07-02 15:06:48.138419705|2092-03-31 +-118|13138|-1652600376|2072|31787.11|4413364.49||false|opthamology|victor zipper||2058-07-13 04:38:16.443396189|1996-08-31 +32|29023|-856843296|2073|48554.24|-2764932.99|4532387406205.238281|false|mathematics|jessica polk|2066-09-16 03:57:39|2065-06-11 16:20:38.164287829|2011-01-20 +-31|-9496|915505006|871|37794.69|4782774.03|3414437080001.526367|false|geology|luke hernandez|2061-05-20 02:26:59|2068-01-21 09:27:30.854734737|2023-12-26 +22|-6460|1179528290|-7551394356730339328|-23041.73|-4296206.08|4962408770099.849609|true|xylophone band|luke falkner|2058-10-05 19:19:16||2021-12-19 +83|-29675|-113253627|-8172827216441573376|-1794.84|-317220.62|688329351482.579102|true|biology|david robinson|2039-03-30 18:42:28|2020-02-03 05:20:18.744684653|2050-09-20 +88|6761|-1878838836|-8082793390939193344|36116.48|1298897.65|1744469246864.212891|false|chemistry|tom ellison|2030-01-25 11:28:31|2016-05-22 14:39:37.64576689|2044-06-20 +-49|-12588|2065408093|8854495099223375872|25810.56||-3371566685461.750977|false|opthamology|priscilla polk|2029-08-07 14:29:01|2048-03-29 02:01:01.562257980|2000-08-18 +-14|28008|-122391516|-8358130693961195520|-33737.35|4303885.61|207192707064.258789|true|joggying|alice garcia|2025-11-21 05:44:38|2029-03-08 07:36:34.632835852|2105-01-06 +-52|27527|-1240048334|9050032047355125760|-41056.39|-2126298.83|3424231918266.365234|false|education|sarah ellison|2055-05-29 10:01:01|2053-06-19 04:48:57.10910076|2004-06-03 +84||1813010930|-7162299524557471744|-4970.18|910733.08|-4747435156455.921875|false|history|quinn ovid|2056-11-24 23:44:36|2054-10-30 00:49:45.287291218|2070-02-03 +28|-21506|-682333536|809|-13948.95|2291401.51|33266358053.083008|false|geology|ulysses steinbeck|2080-06-19 15:17:59||1984-12-28 +4||-759911896|-8946656952763777024|-4183.99|-4038048.77|-3167988150233.824707|false|opthamology|katie white|2061-09-28 17:08:58|2015-09-01 23:29:55.301806098|2032-01-18 +-100|13491|-1369302744|1053|-10809.39|4440019.40|-1117844143988.975098||values clariffication|quinn thompson|2023-03-09 21:06:33|2027-12-13 08:22:59.973086123|2015-09-29 +4||765084282|482|-24177.64|-3926632.94|-128454528929.609375||biology|ulysses johnson|2039-12-01 00:28:17|2047-02-12 11:40:45.586393217|2090-06-30 +126|-16307|470993066|-6968892545529896960|2416.88|-767525.41|-4599746514511.089844|true|industrial engineering|luke garcia||2068-09-30 15:34:57.912726771|2043-08-23 +-3|24819|2070969353|203|49013.15|3648039.90|-2530166692170.396973|false|religion|zach davidson|2028-03-11 20:18:02|2023-01-31 06:26:26.819065415|2026-12-24 +-79|-19571|-296195507|1614|1339.61|472805.29|-4628165921333.362305|false|geology|victor thompson|2025-05-07 23:33:22|2076-12-14 19:01:33.673146196|2040-05-30 +88|7057|6266567|-8593419958317056000|20993.37|3470920.76|2907965923466.875000|false|opthamology|jessica van buren|2026-04-10 03:58:42|2027-05-24 20:24:57.709957719|1985-01-18 +42|-562|604460005|8190967051000659968|10778.57|1505668.86|-4298212148464.983887|false|american history|priscilla king|2071-01-07 17:48:53|2050-08-28 10:51:32.79079993|2036-07-23 +-5|4536|-1836166334|808|-26348.93|1609583.36|-2450581027868.068359|false|biology|zach ellison|2064-03-18 16:36:24|2040-12-19 14:19:07.29019956|2094-02-23 +31|2855|458910170||4194.56|-2703308.04|-3920065100013.161621|false|geology|holly ellison||2064-10-06 03:04:54.202018334|2058-01-16 +-36|813|2144454927|412|-17894.49|3767875.02|-4076956189315.090820|true|zync studies|irene young|2051-03-22 13:42:47||2069-11-02 +-19|-16002||8656571350884048896|44168.85|4141077.81|2689794991277.487305|true|yard duty|rachel steinbeck|2038-02-19 06:53:09|2052-02-15 19:04:26.991933690|2018-01-25 +-123|25732|2100377172|8769199243315814400|14038.74|191532.01|-3373496595819.993652|false|quiet hour|quinn allen|2080-05-30 20:51:34|2038-06-11 05:22:17.584841494|1997-09-20 +-28|-9278|-1236536142|-8546758906409312256|-18615.91|-385172.08|3145518148645.542969|false|values clariffication|quinn ellison||2050-08-26 21:46:55.776228072|2006-05-04 +90|-27844||8829545979081744384|-7890.36|-4687872.68|-189051209331.019531|true|forestry|holly ellison|2078-01-22 08:05:44|2051-12-30 01:07:06.843556116|2019-10-31 +-33|6756|-1380678829|7545689659010949120|3119.59|1219503.88|1383475996151.766602|true|chemistry|gabriella nixon|2013-11-15 10:53:04|2074-01-13 06:13:44.309707032|2086-07-20 +-27|32063|-412333994|618|18352.29|-2682908.49|-2196296807639.192871|false|xylophone band|mike ichabod|2049-05-17 01:36:05|2016-01-06 04:27:12.937792140|1988-07-21 +115|14089|1583280136|8573305425181941760|-12596.11|-4306107.96|3399085059351.882812|true|forestry|bob carson|2077-10-22 01:28:01|2070-11-22 05:00:18.148513417|2060-02-01 +87|-5837||94|31117.44|4356549.19|3648188962203.846680|false|joggying|quinn nixon|2013-11-03 06:51:21|2060-06-22 12:16:37.962088375|2101-04-20 +42|6036|960187615|-7266719102957125632|-47893.89|-295854.59|2917455244518.621094|false|topology|calvin zipper||2064-05-28 07:51:38.18135161|2091-10-08 +-57|-16290|1835749815|2772|-2499.18|-2208789.27|-1579859560936.931641|true|joggying|wendy carson|2068-08-26 13:27:51|2021-11-05 06:16:08.956633171|2012-03-06 +34|-2518|1625699061|379|-24309.03|3375470.42|-1718131278688.766113|false|education|ethan johnson|2046-02-23 07:23:16|2040-11-20 23:57:22.271546670|1978-07-29 +69|28358|-1524081566|8302473563519950848|-297.67|-3373283.08|-2009241907914.223145|true|joggying|ethan van buren|2047-08-15 06:48:02|2043-04-13 19:11:51.590383118| +-11|31334|215759857|-7802538500225777664|33944.93|-2034061.98|-3034548052198.289062|false|industrial engineering|sarah steinbeck|2055-02-11 13:24:46|2053-11-27 08:12:32.905325041|2039-02-11 +76|-25155|2009890220|-7273694358642851840|18007.61|-1882990.85|-1586767512900.379883|false|religion|nick underhill|2043-09-05 07:30:29|2039-05-08 08:23:28.744105983|2012-06-13 +-42|-27015|-1024500955|8987827141270880256|6071.06|||false|history|rachel thompson|2080-11-04 06:42:28|2022-05-07 19:23:26.462479493|1990-08-27 +-91|-7300|-1257859205|914|-5866.74|-1809849.07|-1670602067707.562988|true|xylophone band|mike garcia|2057-06-04 13:48:28|2056-12-01 04:07:20.17327137|2090-07-13 +|-13972|1616782308|723|15887.15|-2469559.45|-380705728848.234375|false|american history|jessica quirinius|2057-08-11 06:45:24||2003-06-16 +-69|-10569|-1928197479|-7104310188119834624|26076.44|2535185.38|2312022922718.455078|false|linguistics|calvin falkner|2033-03-03 06:25:26|2015-06-15 18:44:04.445242368|2020-04-30 +-101|12583|998793176|154|-39236.48|2570101.79|-2715354441339.101562|false|history|xavier falkner|2062-06-17 13:37:19|2065-12-13 13:46:25.594101554|1983-02-08 +28|25518|631207613|-8494118409594650624|33125.44|-315524.15|4237441963242.361328|false|study skills|tom garcia|2048-09-29 05:52:03|2066-12-04 01:20:15.241688388|1988-03-29 +-40|-31164|-161884324|3781|-49606.91|1038717.47|155199366725.398438|false|joggying|oscar nixon|2023-10-07 10:39:24|2071-02-16 02:10:47.198374359|2055-05-12 +-124|-5267|574069547|1466|4445.84|3318110.58|3262707195109.015625|true|industrial engineering|tom falkner|2049-01-22 07:20:24|2013-04-19 01:40:57.733559293|2064-01-22 +-28|-20663|-616724730|724|43976.01|-254306.16|-2564468363230.716309|true|industrial engineering|mike brown|2040-06-27 02:12:07|2027-09-24 00:10:50.868223193|2043-01-07 +|2542|-1984079412|-7270034223527993344|42132.29|1872487.12|4239681407336.525391|true|undecided|holly ichabod|2036-02-28 11:43:29|2058-09-07 02:46:35.228498480|2052-03-13 +-62|-25077|1845797092|913|18763.32|622185.07|2205467425785.875977|false|religion|alice zipper|2072-02-01 03:42:53|2054-05-21 06:31:30.994303844|2024-02-23 +64|32309|-605370177|1704|46536.51|-2429187.29|-3869444972623.164062|false|linguistics|priscilla davidson|2047-07-10 07:46:00|2047-03-10 19:56:43.16082472|2077-03-07 +-21|2590|-915104901||-19995.58|2053186.77|957236234850.747070|false|wind surfing|yuri carson|2015-06-30 01:25:11|2070-03-17 03:33:19.30902666|2036-02-16 +-75|-12782|1136548971|-7883252982752665600|-814.76|2576885.92|2555233457696.850586|false|geology||2024-05-25 02:43:22|2040-11-17 01:26:44.81314350|2000-03-04 +127|-27312|-505879576|412|39913.52|2317989.11|4849612269862.681641|false|opthamology|holly steinbeck|2048-01-19 12:38:07|2054-11-08 01:30:13.939944047|2000-05-28 +-28|-25412|-202035134|4024|-4465.40|-1742902.49|4667839539207.853516|true|undecided|katie underhill|2039-09-02 21:29:19|2071-08-10 05:34:49.654645949|2098-09-08 +-26||-935723237|7226360892091416576|-31695.38|2802498.83|2390739186802.050781|true|kindergarten||2051-11-30 06:15:36|2050-03-18 13:45:32.683739800|2052-09-26 +-115|-28939|688547276|-8244657976255889408|36162.80|-4989122.80|-2552765883743.006836|true|xylophone band||2034-11-26 00:25:11||2025-05-13 +-86|-12071|1956887369|2862|1570.24|-4626990.97|3666109195511.175781|false||bob king|2051-01-24 01:38:45|2055-03-11 20:34:57.197479653|2034-02-08 +-67|-1443|-993029335|1521|-46155.68|4524183.68||false|debate|oscar laertes|2058-04-21 07:57:00|2078-08-31 09:56:45.671467607|1982-04-01 +7|7530|-549167249||20406.25|1525395.61|-1198863806969.718750|false|history|holly nixon|2043-01-28 08:16:12|2045-05-21 07:53:06.829103969|2030-09-22 +3|7603|2038381675|7386087924003676160|-21580.07|933815.29|-3828528440938.576660|false|linguistics|quinn ichabod|2026-12-02 16:54:36|2044-04-29 02:35:27.537827490|2086-05-11 +-69|-13474|-373038706|3907|-18880.83|-939028.09|117666480427.907227|false|undecided|priscilla ovid|2067-02-17 21:38:22||2075-08-06 +92|2833|-2119724898|7818464507324121088|30292.00|884488.54|871664464870.037109|false|joggying|holly young|2065-11-24 15:01:57|2071-09-02 12:05:40.306146425|2041-05-01 +21|30075|-1464514590|-8293833565967810560|-36263.23|-1474035.29|-4690640135324.102539|false|quiet hour|nick garcia|2033-09-22 16:37:28|2072-01-29 18:50:14.473666434|1994-05-25 +119|-12571|-779743333|-7892780594910871552|4572.65|995018.67|-425783093743.490234|true|joggying|ethan white|2061-07-26 18:52:34|2071-07-05 00:55:22.981130585|2029-09-01 +-83||1920662116|1509|33314.97|-2730529.51|84511300621.094727||wind surfing|tom hernandez|2050-03-09 05:14:35|2078-12-03 09:09:20.319905333|2029-09-01 +85|-13713||7592440105065308160|42570.35|4483692.57|-2934238933405.062500|false|xylophone band|jessica quirinius|2030-03-23 21:15:46|2045-01-19 02:06:48.899240157|2103-08-27 +53|17436|-722294882|-7600138468036386816|36893.19|-4542207.94|-1743688633337.790039|false|education|quinn quirinius|2017-05-28 14:36:27|2066-02-21 06:53:29.204553365|2009-09-12 +3|10727|-1849091666|9064847977742032896|17555.77|-4634541.04|2527955410135.795898|true|nap time|oscar carson|2048-03-24 17:24:21|1970-01-01 00:00:00.846186492|2033-08-07 +9|20766|-181122344|-8379964450833367040|-35831.96|2510460.11|3065070467778.455078|true|geology|irene davidson|2050-02-22 15:05:52|2027-10-08 09:45:46.728679053|2050-06-14 +113|-3165|-90029636|7217123582035116032|12022.56|3658068.63|-2382124601444.530762|true|linguistics|wendy laertes|2033-11-09 23:52:55||2070-09-29 +93|24677|-472303419|-7879864376629567488||-4715822.32|3836587003422.593750|false|linguistics|rachel garcia|2020-12-08 01:26:41|2018-07-13 07:36:49.359511410|2067-01-14 +9|-3885|-906545548|2878|-38407.73|-1928000.84|-2800032052132.195312|false|zync studies|xavier falkner|2056-02-23 09:06:30|2047-05-31 06:42:08.683378784|2016-11-13 +-37|23750|-1749841790|2412|46750.89|-2458475.76|4439101915043.935547|false|zync studies|katie thompson|2073-10-11 11:47:19|2026-01-28 22:23:58.98308516|2092-06-27 +-91|-29218|-1326025787|524|42679.72|-662744.93|310684347775.804688|true|chemistry|calvin garcia|2068-11-17 02:45:29|2059-01-25 08:18:48.682643621|1971-12-01 +75|21407|44595790|784|-39388.69|-4752379.35|-4679521339883.549805|false|debate|xavier brown|2059-07-30 11:32:09|2029-01-31 01:58:51.459910049|2007-05-08 +35|4397|-117723745|-7046180371529351168|-22128.92|-1882093.86|604464981663.425781|true|quiet hour|oscar white|2014-11-01 18:19:21|2080-04-14 06:26:44.785796402|2054-06-03 +-26|-30730|-207899360|471|22281.44|-2302502.73|-1952325542067.043945|true|debate|tom ellison|2041-01-14 15:00:44|2080-03-13 16:08:00.809220690|2063-04-29 +-81|-7564|1131663263|612|19632.85|3876730.76|-3157294779103.096680|true|study skills|david laertes|2062-12-12 19:12:58|2035-06-20 23:05:28.501460278| +-98|21941|-1665164127|8368012468775608320|528.05|3412429.91|-950235709930.815430|true|industrial engineering|victor polk|2043-04-29 14:31:44||2060-04-22 +75|-716|1784291853|-7547245548870025216|33682.36|-2277813.00|-2651511947479.434570|true|history|yuri falkner|2060-09-06 05:02:04|2015-12-10 05:12:43.991653609|1970-04-26 +1|8466|-901079162|3841|26403.70|-2787173.70|-4078808993300.313477|false|zync studies|fred ovid|2042-01-27 06:10:24|2065-09-04 01:31:40.42530548| +-97|913|-1502924486|8752150411997356032|31964.76||4205989218534.185547|true|opthamology|rachel nixon|2027-03-17 19:37:19|2028-03-14 08:43:52.398149158|2064-03-06 +-34|15015|1637295757|-8623965248051789824|-13910.96|-1899040.07||false|linguistics||2074-06-10 07:25:52|2064-06-11 04:29:32.55418236|2094-06-14 +-33|20036|1880017800|7637152193832886272|-13101.15|-4590457.77|-1226073411038.548340||quiet hour|bob nixon|2055-07-25 23:02:17|2072-05-17 10:12:38.513326012|2012-07-07 +22|-16940|-595769210|9191943992860327936|42065.10|353607.21|-3757504944023.821777|false||bob polk|2049-10-28 12:03:35|2028-12-24 08:45:04.462025798|2099-03-24 +-81|25454|-123529324|2700|-49851.20|-199346.24|3039471429849.208008|true|american history|luke underhill|2041-05-03 12:44:35|2062-05-13 13:04:05.633293665|2016-04-11 +111|-6349|1950882901|9180098147855769600|8936.06|4586140.88|-2562228399414.543457||undecided|xavier allen|2047-04-04 14:41:47|2029-10-19 11:17:14.241599813|2076-10-10 +32|-6564|1928365430|1775|16609.42|3708243.51|-1030172091977.021973|false|chemistry|alice thompson|2057-06-03 18:50:23|2016-01-12 02:37:54.734293795|2010-10-15 +87|5550|996831203|797|-32213.23|-348044.95|2004890357846.349609|false||quinn quirinius|2025-05-25 18:20:23|2065-11-02 11:48:50.631850931| +48|-9943|270090617|-7773957003968675840|39329.34|4135595.75|-243786022935.737305||wind surfing|holly laertes|2030-12-19 19:08:33|2023-04-14 22:34:26.707421905|2043-08-05 +-115|28301|1317690178|-8660149447361404928|-37823.29|-2814333.34|-3512730030774.058594|false|geology|holly miller|2029-03-26 00:28:37|2039-02-23 12:54:55.42365758|2076-09-26 +-46|18350|-1531040609|8641221723991433216|23823.69|-427642.08|2819404917896.939453|true|yard duty|rachel davidson|2024-07-29 07:41:06|2025-01-31 13:56:58.481985366|2067-01-10 +51|-13904|1664736741|392|-33079.41|4087589.11|4145813588397.863281|true|chemistry|jessica king|2035-02-25 00:23:44|2052-01-19 21:04:20.398663046|2079-08-01 +19|-26613|-971615370||33112.52|4554924.96|-4303832304648.800293|true|chemistry|calvin allen|2073-04-14 09:13:06|2039-12-03 11:31:50.851455297|2009-09-29 +-89|29333|-1124028213|8489735221193138176|27673.25|-4285255.26|4961373696863.767578|false|linguistics|katie ellison|2072-02-05 07:19:52|2067-07-12 01:46:05.391290184|2018-03-24 +0|-12203|372099650|7944741547145502720|-6724.11|-3694995.69|-630159357028.894531|true|undecided|luke robinson|2033-06-16 06:37:14|2014-10-03 05:32:19.137455217|2068-06-05 +111|18910|780859673|6933731240564056064|-44662.14|-1927984.24|-3491883806228.379883|true|biology|katie van buren|2038-08-20 15:28:40|2064-04-16 22:33:47.576062785|1970-06-03 +57|1280|-1359838019|9083704659251798016|20916.87|-1497681.60|2846930959096.063477|true|biology|quinn miller|2053-01-28 18:41:42|2060-07-25 07:46:20.708072689|2002-05-27 +122|9177|128430191|-9084940280061485056|-42499.27|3229184.24|-4017461708820.384766|true|values clariffication|ulysses ovid|2034-11-06 04:21:49|2034-07-30 16:22:19.644823674|2023-10-05 +-78|-10532|-318380015|8222714144797368320|-11740.33|-2082364.30|-2110282626871.757812|false|xylophone band|katie ichabod|2046-04-25 00:51:33|| +-82|-24320|1550375386|8817665768680906752||-953044.44|-2273400242763.017090|false|geology|sarah carson|2048-08-22 04:37:46||2086-10-16 +113|-27401|1012230484|1995|40659.62|-4935987.77|-1008868027448.919434|true|debate|sarah polk|2062-01-02 11:36:14||2080-04-14 +-111|30736||1561|-31482.56|-1580110.12|-1566816988300.167969|true|zync studies|quinn quirinius|2060-03-07 21:51:32|2061-11-09 12:07:37.596342844|2026-11-23 +-5|-4037|-379643543|2485|-9312.35|3850072.85|2057210782083.452148|false|industrial engineering|david quirinius||2061-11-11 05:38:01.34481671|2021-04-19 +27|-13623|505902480|1826|-42341.81||2681464429083.355469|true|chemistry|ulysses garcia|2074-12-19 04:00:03|2049-02-17 19:06:31.903289514|2013-01-23 +|14234|-1026746699|845|35740.11|4705168.38|-4722254262111.391602|false|quiet hour|rachel hernandez|2019-03-25 04:02:54|2080-10-07 11:37:55.957012102|2004-08-13 +-58|3325|1665724041|8376440110255243264|46586.97|1120362.22|3853942610392.876953|false|history|luke laertes|2029-12-12 07:11:57||2004-04-01 +-17|22289|712816880|9075404705968840704|-15320.85|-3930645.10|-4500316102425.934570|false|linguistics|alice king|2062-09-20 07:45:00|2021-06-10 09:55:52.478946811|1994-01-27 +-28|-11740|1566607834|-8379109122834997248|21449.12|-4905990.36|-1952272677926.849609|true|debate|david brown|2018-12-26 15:21:05|2055-07-04 00:03:47.54540038|2058-11-22 +-102|30420|980732494|-6938706403992854528||4290144.13|3973620252675.828125|false|nap time|holly polk|2013-06-14 16:30:17|2079-04-01 01:20:33.477066908|1973-06-12 +-27|10473|1805139501|961|540.66|1846904.66||false|study skills|katie thompson|2080-12-15 12:30:07|2018-10-01 01:35:46.827604858|2065-10-01 +93||-1402821064|1422|-32181.01|1471767.66|3888068884720.255859|false|forestry|luke brown|2055-04-11 22:16:15|2023-10-05 09:42:54.182435030|2001-11-08 +-72|-31033|-694520014|9149216169284091904|-21438.32|592620.51|-868405318561.400391|false||quinn steinbeck|2038-03-12 13:12:37|2019-08-02 08:13:35.567323306|2015-09-08 +-83|-21268|962091264|2752|-9102.58|-1920670.21||false|forestry|tom polk|2048-07-06 23:16:50||2103-08-02 +-91|5014|-1561738723|2255|11659.02|-1238767.70|2257301642704.199219|true|opthamology|fred underhill|2034-05-29 03:12:18|2056-10-14 08:06:18.341166711|1973-02-06 +-46|24913|906074599|-9080568167841226752|-1633.77|2280305.65|3418787511905.457031|||wendy nixon|2054-11-07 20:23:51|2069-03-08 13:59:21.935346573|2027-06-01 +-55||-990781312|1046|-17223.20|-2914331.70|-3061715672539.690918|true|american history|victor xylophone|2025-06-24 19:34:32|2029-05-31 09:16:06.592040235|2067-06-22 +85|14176|1582537271|7926898770090491904|-48480.35|-4206386.08|-3830188857046.366211|false|kindergarten|jessica garcia|2027-03-21 01:29:39|2050-06-01 01:40:27.460760962|2094-01-29 +5|26915|-158848747|7784489776013295616|25599.75|3264338.39|1398225707031.031250|false|geology|sarah ellison|2081-02-08 13:31:19|2016-12-22 00:48:27.635611363|2019-01-04 +48|20655|-1345085327|6991316084916879360|5659.91|2971588.36|944624924616.661133|true|philosophy|bob johnson|2042-07-11 18:36:57|2037-12-17 04:00:52.301645045|2015-05-25 +-60|-23750|747122546|1566|-32617.06|3930932.70|904359634710.800781|true|american history|david thompson|2049-08-15 13:53:19|2049-05-25 04:35:12.968980434|2054-07-15 +1|19276|1504919241|1671|-35828.00|-1990218.76|1742934975017.433594|true|undecided|calvin miller|2033-10-17 07:51:41|2045-07-16 13:46:26.28793094|2089-07-13 +76|10382|-607667405|-8543982423727128576|15112.99|-1144078.40|1538907131524.070312|true|nap time|ethan young|2039-03-18 13:48:12|2022-05-16 00:50:53.345008930|2072-03-19 +20|5947|-66010816|-8832750849949892608|43254.26|-4684380.49||false|debate|rachel davidson|2022-03-02 20:53:13|2029-05-15 20:54:25.943787316|2051-12-13 +|10083|-1340213051|6963217546192322560|-23734.81||42087741032.439453|false|wind surfing|rachel ovid|2026-01-17 13:10:48|2068-04-09 14:34:20.845374064|2046-04-19 +25|-11129|514833409|236|14759.04|-4445156.14|-2471323804264.180664|true|chemistry|katie nixon||2062-06-11 00:50:35.200240078|2012-03-25 +-117|21418|-1106685577|7086206629592252416|-20613.82|-4669723.14|1287664454410.020508||undecided|ulysses ovid|2014-05-27 23:10:23|2075-10-09 13:36:21.138709539|2055-09-11 +73|-32208|1075444504|9053187076403060736|-20607.86|-663557.15|3604091262736.055664|false|religion|luke robinson|2064-03-29 13:46:25|2028-09-13 12:25:32.422151087|2001-09-18 +68|-9365|214068706|-8067243114610532352|-40369.97|-372273.18|-4679196929122.006836|true|american history|bob thompson||2016-06-02 13:49:05.762346775|2031-11-23 +38|29288|-667383951|1751|-40969.80|352106.97|-3590646924755.134277|false|religion|alice white|2021-01-31 08:56:35|2074-02-24 20:30:17.797682689|2054-10-30 +73|1077|-336625622|2502|34593.52|-3380008.10|4861797358093.511719|true|linguistics|yuri underhill|2031-10-04 05:10:58|2020-09-16 21:07:49.9039871|2000-08-20 +86|-8927|-1817096156|294|18420.41|-718931.02|-812599933568.947266|false|topology|yuri miller|2030-03-08 13:49:25|2059-08-20 13:04:07.326107637|2026-09-01 +46|-26138|-371779520|7892281003266408448|8622.19|4294576.25|-2935717014226.568359|true|education|fred xylophone|2025-08-10 03:36:42|2058-08-31 20:49:43.13681707|2017-12-24 +125|7954||8577096957495025664|-40074.79|-3740147.62|1353404175657.040039|false|education|sarah garcia|2020-04-24 05:38:29|2059-09-29 14:12:16.157602035| +|23725|-45460011|-8665764757143658496|-10058.15|3111392.39|4140724331387.878906|false|wind surfing|mike nixon|2022-12-19 01:07:38|2077-04-10 05:41:52.802703873|2039-07-14 +95|-25151|-1770250407|2855|-8224.46|-1232102.28|1061929942085.519531|true|education|victor davidson|2024-08-05 07:04:05|2076-04-22 18:20:42.949891304|2082-07-08 +-92|22075|-170643477|2811|-6333.87|-2996672.74|-4596379165591.137695|false|education|oscar johnson|2075-03-08 07:27:49|2031-01-22 04:35:56.338245755|2001-02-13 +-107|15530|1028092807|8785153741735616512|23063.22|4917736.71|-1387735872479.432129|false|study skills|ulysses quirinius|2051-06-21 00:48:20|2064-05-26 10:29:04.116652584|2001-03-24 +-35|-4509||1726|-36045.69|-957009.39|-1436845214707.389648|true|industrial engineering|katie ichabod|2061-12-03 22:11:19|2037-09-03 21:28:18.120030510|1976-03-25 +10|8243|1430614653|7186401810812059648|21430.58|-4117132.25|4273903933165.144531||values clariffication|david brown|2080-09-06 01:51:56|2067-02-25 00:06:03.464200151|2008-03-31 +-100|-18358|390124976|-7603569103205916672|9320.42|820509.76|-2378674120573.869141|true|philosophy|gabriella laertes|2054-02-15 22:48:03|2023-07-16 09:03:23.416142255|1987-01-17 +-34|-12896|-396852483|4018|14230.43|1948755.04|996493613976.418945|true|american history|jessica nixon|2035-11-08 16:07:43|2042-02-21 13:24:08.768185439|2024-08-27 +-79|-3883|-519978947|3566|-25444.08|-4338469.48|-2225952289175.303711|true|biology|bob van buren||2053-09-12 14:38:23.627646057|2061-08-19 +38|11067|257821327|2725||2123890.76|-4311579608982.931641|true|quiet hour|bob ellison|2074-07-08 16:12:25|2028-02-16 07:26:07.314316265|2046-10-04 +-46|9447|-1921909135|1234|43332.86|-1313189.64|3483740651742.017578||debate|ethan hernandez|2053-12-13 02:32:42|2024-05-02 15:41:49.244300741|2012-02-14 +|-31217|-1880877824|346||-4470321.21|-2089255269314.307617|true|zync studies|tom quirinius|2032-11-07 23:12:11|2057-06-05 07:40:56.143268958|1986-12-22 +||866084887|7961515985722605568|-13131.71||-2394101676301.012695|false|wind surfing|david polk|2052-08-30 08:01:09||2052-02-02 +21|32611|482977302|7274777328897802240|-6306.38|1252331.81|-533108289688.186523|false|study skills|jessica robinson|2061-05-19 17:30:01|2079-02-13 03:05:01.151556875|2064-10-20 +-80||581259902|-6933565857643814912|12765.75|-3696105.78||false|wind surfing|yuri ellison|2032-01-12 17:09:47|2026-12-04 01:38:04.606186772|2046-12-26 +62|-776|-409404534|-8330233444291084288|-13235.88|-4942351.80|1478986789735.259766|false|wind surfing|luke nixon||2015-09-27 04:55:14.418545051|2079-08-04 +-15|7988|1969650228|34|49433.36|-1454704.21|3304426642736.508789|false|debate|bob white|2032-05-25 11:24:01|2050-08-06 03:07:56.829882190|2011-02-25 +-100|-19374|-337073639|7080269176324218880|9636.84|4955437.13|-1605703810903.698242|true|joggying|priscilla nixon|2016-06-18 01:36:41|2076-06-24 17:32:46.629540610|2051-08-11 +31|11050|-1862095575|2941||4773173.81|-4916009895587.691406|false||tom ichabod|2026-02-05 06:06:27|2033-11-01 09:06:22.861384004|2064-04-06 +42|29954|-297664578|9117063974299148288|-36654.25|1809785.84|-2990108911038.724609|true|linguistics|quinn zipper|2063-01-04 13:11:49|2032-04-05 17:24:19.790913068|2082-02-07 +36|-1307|-603273425|-6917607783359897600|20495.55|-717022.00|401293377521.448242|false|xylophone band|ethan steinbeck|2043-08-16 10:35:12|2074-08-29 01:50:39.79906002| +-47|20307|-1409508377|-8566940231897874432|-23112.40||-4906953690985.069336|false|american history|katie laertes|2018-04-24 10:05:45||2070-04-05 +-27|16664|-1817938378|-8710298418608619520|-19481.20|2004949.48|-2008363741307.021973|false|philosophy|sarah laertes|2038-09-29 23:53:07|1970-01-01 00:00:00.626944877|2041-07-12 +-31|20391||1520|39315.23|4922532.64|-1940382804363.878418|false|industrial engineering|priscilla brown|2045-10-19 14:00:02|2047-12-07 08:51:00.336787227|1988-07-12 +113|-21146|-800799595|3728|-6051.92||4288442203475.107422|false|history|luke ovid|2042-04-07 13:12:41|2017-11-07 00:47:02.498508838|1980-07-10 +127|-26526|-938112972|-8835408234247168000|-5042.40|-639514.18|2253781196684.378906|false|topology|rachel polk|2059-06-15 22:27:11|2078-04-15 00:06:23.865331718|2002-01-27 +47|3374|527598540|7705445437881278464|5750.75|265215.83|-3997464183110.714844|false|topology|luke ovid||2055-04-30 05:00:45.288293151|2038-08-04 +-57|7826|987734049|6926925215281774592|4092.06|-4043613.94|1825916166900.114258|false|wind surfing|alice hernandez|2066-02-26 20:09:56||1994-01-31 +30|-4159|-1054609414|835|-24688.28|-3625707.02|-1643333177209.917969|false|biology||||2097-09-12 +23|1326|-199587670||23788.91|-602696.25|3307871238283.475586|false||priscilla miller|2017-03-07 08:16:25|2052-04-04 23:29:00.786514521|2091-03-28 +59|3270|1434588588|3232|1480.61|743880.56|2593124571056.222656|true|wind surfing|nick allen|2056-12-31 22:55:26|2076-06-29 19:41:36.482632437|2051-05-15 +66|3245|-300717684|-7840338174858199040|40071.10|-905100.40|-4829089800174.344727|false|wind surfing|sarah ovid|2056-03-09 19:09:50|1970-01-01 00:00:00.59977853|2047-10-04 +85|6074|-540401598|7748799008146366464|20761.91|-4916297.37|2180818539557.698242||joggying|david brown|2024-11-21 14:50:04|2028-01-24 01:00:27.143579402|1970-07-16 +38|-7948|1987336880|7410096605330227200|18427.96|1858816.32|2290676735205.039062|false|topology|fred ichabod|2052-07-29 14:33:52||2090-05-17 +-127|11451|316438994|188|6220.87|2395035.62|1762390956829.769531|false|american history|sarah white|2035-12-04 05:59:54|2037-10-21 10:24:49.395474612|1976-11-08 +-49|19041|-1464762852||-9900.51||4912867697965.523438|false|biology|bob garcia|2080-03-12 06:31:47|2079-09-20 05:14:58.169640521|2008-01-13 +-104|5191|595836061|-7709958788604936192|-17146.91|-3981389.85|1420240216579.884766|true|nap time|priscilla johnson|2018-10-25 09:42:50|2027-12-11 09:57:50.980641227|2019-11-03 +115|-26832|-764412063|-6920172215209426944|17394.03|561210.22|997882374014.622070|false|linguistics|victor falkner|2018-12-21 14:56:23|2074-08-29 04:26:14.412387056|2100-03-23 +-84|-16327|407098216|-9109392978217484288|24409.31|4875980.88|2156284924808.034180|true|undecided|priscilla garcia|2049-08-03 07:20:14|2072-03-18 16:05:07.216021739|2020-01-11 +56|21814|773730574|3608|19617.72|2612031.03|472615987880.900391|true|values clariffication|holly polk|2079-07-31 02:12:53|2033-04-30 22:04:27.178988903|2065-03-04 +109|-5097||-8214462866994339840|14926.06|4194003.54|-3157298893601.385742|false||ulysses falkner|2051-01-12 23:18:22|2056-07-05 00:35:53.936899939|2083-09-24 +31|6900|-604362582|2306|25232.04|-301550.41|289949139351.364258|true|zync studies|gabriella ichabod|2041-03-14 14:44:09|2068-11-06 15:41:10.293161863|2098-08-25 +-122|30119|-2065287410|-7759238919361888256|-48628.97|4508399.10|-1946145505911.474121|true|topology|rachel quirinius|2042-12-15 05:57:04|2057-11-03 01:20:57.262442758|2077-07-27 +-81|32567|-536315467|-8922409715403112448|-49532.02|-2541992.13|-366989953592.735352|true|forestry|luke allen|2059-07-26 01:05:11||2075-01-17 +58|-10247|-186600427|3664|-20024.15|2499207.86|3164512989036.332031|false|history|sarah thompson|2036-09-10 12:17:11|2064-10-23 01:33:56.949763617|2085-10-20 +87|-22431|-1625800024|-9203942396257984512|10700.82|-4871285.36|1078033806441.389648|true|zync studies|fred falkner|2079-08-03 03:53:40|2077-02-22 11:17:37.324498592|2090-11-24 +89|24782|1914993018|8116738401948377088|28477.87|4428379.36|-3581726665433.642090|true|linguistics|yuri laertes|2042-05-04 19:59:28|2015-11-03 09:21:31.868284245| +-19|-25973|-1900369503|1791|38658.65|3263702.68|-971581426672.542969|false|history|gabriella thompson|2019-12-18 15:34:18|2059-11-10 14:30:30.756815026|2094-09-03 +112|-5635|-4393552|-7419068456205385728|-16898.67|-3147119.24|-4071056405979.831543|false|history|rachel ichabod|2020-05-25 05:03:07||2041-02-27 +6|-5946|1366402722|8795069490394882048|-5903.91|-302873.33|-3374663867091.989258|false|quiet hour|gabriella robinson|2053-11-12 16:58:09||2071-08-09 +-115|325|692666133|3043|-22120.22|3867286.85|3593749673935.349609|true|yard duty|mike miller|2030-09-24 22:40:43|2059-05-19 21:39:40.717511206|2054-10-11 +46|-24248|-1871446009|3174|24920.44|-2650736.01|3646290606200.395508|true||david johnson|2021-12-02 02:13:49|2027-06-25 15:06:55.689498978|2092-05-03 +-92|28558|-1699044525|7625728883085025280|-26098.47|-201231.45|-879898512524.555176|false|philosophy|nick zipper|2039-09-05 02:14:13|2067-05-11 07:05:39.707943033|2082-09-07 +22|8786|-1196808950|-8585134536083660800|36606.56|-1258722.35|3594939606490.008789||philosophy|irene polk|2033-07-09 03:38:52|2070-09-24 03:28:58.997336852|2069-12-08 +6|-30638|-370901197|8558000156325707776|-9840.93|-4341927.96|1094936081745.956055|false|linguistics|katie laertes|2078-08-17 11:00:57|2047-01-10 07:06:46.173146425|2028-12-05 +48||1787826883|-8572949572756774912|-7447.88|-4657467.74|-1120139371327.120117|false|kindergarten|sarah allen|2045-11-09 08:09:15|2041-12-23 20:40:16.590539947|1996-09-10 +-2||1045719941|661|48481.57|-199335.41|1697710053431.365234|false|wind surfing|oscar allen|2077-10-09 07:08:17|2044-08-23 18:37:23.765446063|2039-05-20 +-18|-2636|1852725744|2393||-327510.29||true|opthamology|david garcia|2071-07-30 15:03:51|2049-10-01 11:47:21.417323976|2022-09-06 +108|-8578|1352739140|-7800879252150779904|27366.30||-180060584501.582031|false|history|luke miller|2067-12-30 02:21:40|2029-03-05 23:41:09.158607395|2027-02-24 +70|-30163|2044130430|7534549597202194432|-3684.66|3361124.51|-544906712728.980469|false|debate|luke brown|2015-07-17 03:26:40|2054-07-10 04:48:29.103251644|1981-06-25 +-72||583458404|-7642381493746483200|-41758.86|-432625.76|739045453673.981445|false|xylophone band|fred falkner|2075-02-09 15:25:34|2024-10-11 18:27:50.785894213|2038-05-15 +-31|14335|-2024003241|-7330413050756235264|-20201.82|672555.54|1469464763006.626953|false|topology|||2044-04-09 14:18:23.656514547|2038-02-20 +-44|31242|605946758|7596563216912211968|30099.30|2097061.15|3078551005937.946289|false|chemistry|calvin ellison|2040-08-01 15:36:49|2034-04-12 16:31:19.919073203|1981-01-10 +-115|-5240|-1128317466|3307|-17763.36|-3640432.08|-3634683485380.314453|true|chemistry|ethan young|2014-07-01 14:59:22|2065-10-29 07:42:38.763916292|2102-08-12 +-60|4725|-373541958|2971|-21091.82|3177096.44|2070825437177.284180|false|values clariffication|priscilla quirinius|2030-01-02 10:09:12|2017-02-05 15:11:27.916015499|2098-04-30 +-35|-3427|1533817551|2285|22842.27|434947.31|-233465238088.080078|true|biology|david underhill|2069-11-27 12:11:44|2038-10-25 03:53:58.936213000|2018-10-02 +23|-32383|1293876597|1880|15273.59|1943186.18|1318023503084.724609|true|quiet hour|mike brown|2036-06-15 05:33:48|2035-04-17 20:50:04.917479654|2086-09-08 +15|5972|947846543|4088|-24858.15|-1682574.63|2790323958356.459961|false|religion|quinn garcia|2039-02-08 09:45:49|2051-06-08 01:29:12.240654179|2020-09-22 +18|-10277|1004241194|743|37468.44|90556.54|-1721911019841.009277|false|study skills|calvin hernandez|2017-11-06 20:56:23|2066-11-30 20:25:38.323872312|2087-05-31 +96|-28730|-670925379|-8317591428117274624|45783.32|-4491871.75|1347547267070.333008|true|religion|ethan ovid|2039-04-05 21:26:27|2047-02-15 03:00:04.876322903|2016-12-02 +49|22511|1301997393|8854715632851345408|-7866.49|-4806269.72|4690235057502.966797|false|geology|victor van buren|2077-11-06 21:58:21|2026-07-12 18:24:40.599693692|2023-01-30 +116|21606||7768984605670604800|-37668.86|-4451507.54|1896615693548.895508|true|chemistry|victor steinbeck|2049-10-18 09:39:20|2017-04-25 06:11:37.174630171|2045-09-29 +102|-26461|2114363167|2900|42096.38|273171.55|-2395416839539.450195|false|industrial engineering|calvin falkner|2038-01-28 15:37:14|2021-10-28 12:53:49.438479282|2012-09-25 +-35||-1614194712|7062605127422894080|10143.00|3278700.42|-8877240475.761719|true|debate|alice laertes|2047-08-19 00:56:41|2041-07-30 06:11:21.601946450|2060-10-26 +30|-23672|-1983567458|7394967727502467072|42769.25|245529.05||true|yard duty|nick laertes|2052-08-02 11:29:42|2048-02-09 16:35:20.385399131|2092-01-30 +60|22378||1781|39850.06|-4048951.22|3015653383409.677734|true|yard duty|gabriella ovid|2054-01-29 18:44:50|2063-07-31 19:03:05.155926850|2006-12-12 +38|-12193|-1770229099|7238339720750948352|10757.18||2824970709025.791016|false|philosophy|alice miller||2058-07-11 09:32:37.491928305|2001-09-05 +|-2287|92777932|1638|-25700.79|-181051.84|4692362804086.371094|true|american history|ulysses davidson|2037-12-15 16:39:08|2075-09-17 23:16:13.795444412|2079-12-10 +93|15158|1475025489||8969.07|-4073690.87|909614012102.934570|true|geology|alice davidson|2074-01-01 16:32:52|2019-06-28 19:19:52.904542868|2048-06-04 +|6899|633813435|-8522878384019169280|-8564.75|4996750.69|755521930470.130859|false|forestry|oscar king|2078-04-19 11:14:16|2029-01-24 18:55:58.605673322| +78|-9398|51376784|-8051587217208967168|42110.77|4237872.52|-480292543988.199219|true|religion|fred young|2043-08-31 12:19:13|2027-11-28 17:42:04.124425023|2043-05-27 +-98|22678|-765102534|-7425160895830573056|-36404.91|-232802.04|70171166399.717773|false|mathematics|oscar king|2061-03-29 17:11:56|2025-01-04 05:18:13.67002718|2070-03-30 +115|12263|-1669848306|7344029858387820544|45009.74|-4145959.26|-1287206623226.708984|false|study skills|holly thompson|2030-07-06 18:59:28||2068-03-31 +-98|-18295|-1391183008|-8013397854633648128|-12751.03|4087534.61|1251848312304.524414|true|values clariffication|ulysses brown|2042-11-01 14:59:40|2050-08-30 06:33:17.144659188|2100-07-17 +59|297|-1706867123|8808467247666241536|-23178.82|1057885.16|4299677631158.154297|false|industrial engineering|calvin allen|2017-05-25 20:58:54||2071-03-31 +-68|-26481|-726879427|-8768744394742235136|-46383.26|1261211.13|-2782916269764.896973|true|xylophone band|calvin young|2030-05-23 20:23:28|2051-07-05 23:08:45.51977303|2013-03-12 +-6|31316|-1011125931|9185458640237641728|-17508.25|-658586.36|1020511437056.228516|false|industrial engineering|zach ichabod||2061-07-05 08:12:13.181218526|2006-12-06 +-51|-20182|-892839693|-7686220526274502656|45425.18|1655396.08|3720881118135.359375|false|yard duty|wendy davidson|2065-08-25 22:34:57|2077-06-29 07:51:23.99926741|2003-09-22 +93|15815|-626484313|-8203075743525806080|1895.94||977607524337.074219|false|nap time|ethan underhill|2066-01-12 20:36:33|2016-07-24 00:19:40.599709326|2040-02-16 +122|-22390|511836073|3462|-13499.91|-2876022.64|1328013667669.125977|true|opthamology|calvin ellison|2038-09-26 01:22:16|2050-10-02 05:27:49.739848630|1997-07-24 +31|10544|2146312499|6964585306125008896|-1730.59|1232330.61|4495354594042.523438|true|nap time|calvin allen|2051-08-10 11:03:28||2099-02-09 +-125||-940504641|3418|-42213.02|1998604.86|-2596755613328.838379|true|chemistry|victor robinson|2069-06-09 00:13:59|2049-07-11 09:28:46.795925700|2066-06-17 +-55|21440|-606214770|3366|-7764.69|150515.54|3713473719274.773438|false|philosophy|holly king|2065-10-25 16:36:46|2058-03-17 19:37:18.893032333|1998-04-11 +|-11123|-1078579367|-7867219225874571264|3544.10|-4440011.92|-1597354768432.488281|true|education|katie nixon|2021-12-20 01:53:24|2016-04-25 20:51:22.134986617|2009-08-13 +14|-12517|-1269216718|8367680396909404160|41509.01|-1881292.71|-2614008444640.525391|true|topology|wendy nixon|2031-06-01 22:05:01|2019-05-30 05:53:53.829470930|1986-10-09 +-78|-7027|-664856187|7524958388842078208|-49058.42|2299272.82|1193868149375.028320|false|kindergarten|alice xylophone||2038-05-26 15:00:22.242283005|2072-09-03 +-69|4718|-47662800|2897|32798.27|3454639.78|1810203200294.040039|false|philosophy|zach polk|2069-08-29 08:29:09|2014-11-20 05:08:33.941609402|2082-06-01 +-72|-15957|-630900418|8391785334471589888|-49753.88||-3594602463335.583008|true|geology|oscar steinbeck|2034-06-08 17:12:48|2048-09-23 09:12:15.792761284|2039-09-20 +-57|15379|-674478103|-8581979259158929408|24804.72|516411.04|-2087053480555.418457|true|debate|ethan van buren|2039-03-15 22:10:39|2059-02-19 09:59:56.329119790|1980-04-11 +116|-4799|1485934602|587|-36250.77||-3139538476660.561523|false|zync studies|tom garcia|2070-11-21 14:24:06|2077-06-02 22:53:29.588878692|2021-02-13 +5|20183|-2081501748|130|16581.21|2049671.63|162654497074.683594||opthamology|priscilla falkner|2022-12-17 04:17:53|2050-08-15 19:53:57.340023498|1985-05-04 +25|-17429|-300429552|1030|48749.30|891980.69|-51603607081.567383|true|yard duty|calvin johnson|2017-11-20 04:16:35|2055-03-24 21:11:15.57401629|2053-12-19 +45|-31764|89366322|8362046808797306880|-28518.05|2976251.39|2342010012983.273438|false|philosophy|wendy underhill|2015-07-02 23:30:23|2076-07-01 17:36:04.583971136|1999-02-20 +124|3714|-664111469|3691|-30852.26|2707139.90|-4940400326044.844727|true|debate|katie king|2029-04-17 21:35:26|| +-97|4749|859140926|7454632396542074880|46493.77|4790547.07||false|values clariffication|jessica robinson|2072-02-26 01:04:57|2056-06-21 06:38:40.564403087|2042-05-03 +104|-7467|-2111312205|7125231541858205696|9390.22|-4975282.68|-2004428657765.586914|false|nap time|katie nixon|2051-03-30 14:45:15|2039-09-13 12:20:18.915778346|2105-04-10 +51|-31881|1933545427|2580|-38988.06|-4829221.83|3300559907076.631836|false|forestry|victor allen|2064-06-06 09:21:23|2070-06-29 22:13:10.207059753|2066-02-22 +63|17680|-1164833898|2512|18258.24|-1641179.75|752962351021.224609|false|study skills|rachel white|2056-09-13 22:53:52|2044-06-06 09:44:23.311696677|1978-12-09 +-50|29999|-290558484|7061498706968428544|1510.13|-4232212.06|-4574735317597.173828|true|debate|ulysses allen|2037-07-06 08:01:26|2050-06-04 17:30:11.711624931|2005-07-17 +45|-21005|2100839074|-7255686273677328384|34455.77|-1253884.38|4472794135666.830078|true|philosophy|calvin ovid|2070-07-26 14:31:09|2036-04-08 23:28:54.417802561|2044-10-24 +-15|14982|-1079231269|9048002942653710336|-22879.28|4867276.99|-3098102239046.767090|true|american history|victor carson|2059-03-26 12:10:56|2014-08-29 13:16:50.336506060|2012-02-05 +52|19003|1830870769|8868529429494071296|36316.44|2540170.52|76874893588.936523||kindergarten|ulysses robinson|2053-02-19 15:45:55|2062-12-03 14:50:03.474294016|1984-03-16 +103||-1701502632|8815398225009967104|16082.15|3286175.37|3474939567092.790039|false|religion|mike xylophone|2036-09-01 16:20:32|2049-09-25 10:34:17.329238191|2015-12-01 +54|29171|-283378057|7128222874437238784|5784.30|-2487846.11|1273026486246.777344|true|study skills|ulysses xylophone|2065-03-26 18:47:07|2046-10-10 08:53:02.163030900|2065-06-02 +-29|-22000|826143442|8371939471056470016|-9954.17|1165753.75|-3327839964882.955078|false||zach zipper|2025-04-26 13:39:51|2033-12-28 12:16:51.968462869|1997-05-08 +45|-6045|-1562552002|-8335810316927213568|18734.01|-1945504.67|-3344796345914.708984|true|undecided|katie brown|2057-08-05 00:41:39|2022-11-18 17:27:46.282162052|2040-01-17 +-37|-13426|-876122064|-7144791190333546496|-27443.20|3276942.49||false|zync studies|sarah ellison|2027-09-21 21:34:07|2028-07-26 18:55:38.467639955|2046-09-01 +84|-13038|1107502179||26203.53|1826616.72|52385263076.928711|||oscar brown|2062-03-24 05:55:44|2080-06-08 12:00:45.876270498|2068-05-03 +-37|16654|1652349607||23892.92|1116670.95|2593325712512.423828|true|chemistry|victor young|2079-05-05 06:48:25|2052-09-05 13:46:09.928597565|2077-10-05 +5|30730|-841268868|-7572962089372991488|-40151.14|-600165.15|4768177899583.748047|true|geology|ulysses ellison|2024-10-07 05:46:34|2076-04-07 07:58:46.169024390|1972-09-03 +21|-20657|1503176016|8850055384477401088|-36132.96|3738524.26|2378765399686.458008|false|geology|david nixon|2029-11-13 14:57:22|2018-08-17 10:16:14.518949916|1992-03-07 +107|-15779|1620529246|2626|10150.78|3475802.90|-2294284421075.900391|false|study skills|katie underhill|2037-09-11 08:16:17|2058-04-21 23:50:14.231746612|1999-03-02 +-27|-25112|2069258195|3599|3093.83|3254136.59|523934597183.667969|false|opthamology|nick allen|2080-02-16 07:15:57|2060-05-16 16:45:20.894463536| +-118|-16813|-361944328|213|-40953.43||-4751644152447.696289|false|undecided|david white|2070-12-23 10:54:21|2048-10-22 12:20:49.521887540|1979-08-27 +17|28606|277582670|2232|45641.30|128509.04|4503808618217.277344|true|education|tom underhill|2080-09-02 15:00:49|2037-02-08 08:15:49.8202484|2080-09-01 +102|-18601|283618733|-8297230235506343936|-1052.55|-2765868.79|4944203659075.371094|true|geology|quinn ovid|2075-12-13 06:47:46|2023-03-25 01:12:26.114510579|2001-02-02 +-110|29515|-395499919|3430|-39432.90|3255575.14|-1079961002014.897461|true|chemistry|wendy zipper|2032-05-15 10:18:11|2062-08-26 11:43:13.962436079|1985-01-02 +|-9375|1107258026|391|39049.41|2111980.57|-1263586613411.327148|true|education|quinn steinbeck||2028-11-03 23:51:03.652949047|2089-06-26 +96|28011|830944953|-7395343938785738752|-17062.37|-4047697.78|2447642338433.482422|false|chemistry|irene davidson|2070-06-10 17:13:05|2049-03-17 04:12:33.703429425|1977-08-25 +74|-14836||9038087402564657152|46621.33|1011931.42|2571176224975.069336|false|american history|oscar nixon|2056-02-11 14:02:01|2061-02-10 07:18:47.459454314|2066-11-29 +-42|-15695|-1052493316|-9013952631912325120|24814.36|225913.19|1857357798507.415039|true|kindergarten|mike van buren|2032-07-31 17:27:25|2020-07-02 01:16:38.343352835|2081-07-07 +-80|-11081|440393309|3446|-27153.14|-3115694.15|3504105610850.736328|false|geology|sarah ovid|2080-11-21 06:41:56|2022-09-23 02:34:19.579830812| +5|-22582|-397683105|-8703026916864802816|-21676.69|2972179.30|-3159033035088.328125|false|||2016-03-08 14:54:40|2039-10-18 00:49:44.471616221|2045-10-19 +92|-27715|589546540|-7833618000492109824|14121.48|-2112484.76|-2346132497060.402344|true|geology|david hernandez|2070-12-19 18:40:20|2030-10-05 20:00:42.51205588|2019-03-26 +60|-23853|-1430903652|1541|-22380.78|2905459.89|-4830217659481.838867|false|xylophone band|bob zipper|2044-11-07 17:03:34|2040-06-07 07:46:11.981745178|2022-05-14 +-2|-373|435426302|8759184090543857664|36669.53|-4226813.46|-2014561647846.981934|false||ulysses hernandez|2019-01-12 08:00:20|2038-11-27 08:44:14.384170737|2015-01-09 +121|22333|658636280|-7042183597114081280||1390895.25|3030971793285.637695|false|wind surfing|alice white|2069-01-21 19:40:51|2030-03-05 11:21:30.696290529|2045-07-03 +25|-10312|1759741857|-7147490721376591872|11100.55|-1549045.77|4731512443034.197266|true|joggying|nick steinbeck|2020-10-20 08:00:11|2061-08-30 15:56:42.855551352|1998-03-27 +68|25170|1911834442|3725|-46855.76|1255180.75|-1442847310094.883301|false|study skills|priscilla white|2031-05-04 10:11:19|2037-03-01 08:47:26.266061470|1976-03-08 +85|22852|-1138530007|7961909238130270208|-6093.71|3270007.69|-2674772136505.545410|false|education||2030-05-16 10:48:35|2068-01-21 12:11:12.864391307|2077-01-10 +116|7258|-966979668|-8930307926221807616|16695.39|-3896225.81|||education|alice falkner|2026-04-02 23:02:34|2061-07-08 03:11:42.545676274|2004-05-21 +-127|9863|1516149502|2719|-41428.66|4157540.50|638265920153.422852|true|yard duty|david ichabod|2017-04-04 18:50:25|2050-03-30 05:09:44.583713357|1976-02-24 +-68||-1230459100|-6988970700649168896|-32345.84|-4878754.52|2626801691463.143555|false|forestry|mike laertes||2015-12-05 16:38:41.173038216|2046-11-10 +-86|-22421|-345542922|-7155539549555105792|42860.19|-4587464.51|2546447982029.782227|true|biology|tom steinbeck|2076-09-06 07:07:12|2057-12-31 11:27:12.154351101| +123|-1433|-1656822229|3625||1784691.26|-1490403618753.184570|true||priscilla ichabod|2023-04-14 13:50:19|2022-08-17 04:36:05.763580269|2019-11-15 +67|-17254|129675822|8113585123802529792||1292322.18|-2056228636604.906738|false|religion|alice ichabod|2075-02-23 01:44:22|2019-09-04 19:27:52.964797916|2101-04-17 +116|18354|2066707767|9207927479837319168|11869.69|782908.70|-3561486225626.175781|true|kindergarten|zach thompson|2036-01-14 22:33:10|2078-12-19 21:06:17.223542373|1981-03-03 +2|-6487|-2011708220|-8387347109404286976|3033.19|1933471.30|4046390706154.064453|true|biology|katie zipper|2024-01-09 10:21:56|2048-02-11 17:28:42.54177107|2069-08-17 +-53|-16052|-768305191||17946.80|303090.34|-364312091649.842773|false|linguistics|rachel johnson|2027-04-27 06:12:38|2052-06-15 15:08:20.185912632|2073-07-08 +-54|-31404|1540680149|8896237972875370496|2155.10|-4075392.96|4275511770663.023438|true|kindergarten|mike white|2053-02-17 10:06:35|2033-01-10 10:54:27.769712510|2019-02-12 +73|-29468|564349193|8372408423196270592|8882.78|2172977.04|335964103970.418945|true|debate|tom laertes|2070-07-27 23:21:50|2060-05-03 18:17:59.362453382|2082-10-06 +28|-16425|932774185|922|36886.25|4970929.84|-2211537110226.693359|true|yard duty|bob king|2042-10-20 10:28:56|2037-10-28 21:57:54.89341833|2079-05-16 +-108|-5118|1281159709|7255302164215013376|43517.90|-370366.94||false|chemistry|priscilla davidson|2074-11-03 03:30:08|2027-10-18 04:36:01.205338700|1994-01-16 +-31|-16978|318631333|-8585966098173870080|18570.09|898302.80||false|mathematics|ulysses davidson|2062-03-03 06:35:19||2095-12-10 +-111|-20112|1847210729|8424515140664360960|-13301.82|-3521114.58|-1467777131906.373535|true|quiet hour|sarah polk|2035-03-23 23:34:06|2068-12-06 16:35:56.147946401|2036-05-11 +-118|3486|-76654979|-6997233584896229376|-43892.40|1141130.14|3496895382259.453125|true|mathematics|zach van buren|2078-06-17 15:41:41|2017-02-03 01:26:03.765359003|2079-06-11 +-1|18900|-2137168636|8087737899452432384|24766.64|-1466134.14||false|industrial engineering|katie garcia|2072-04-07 08:27:00|2046-08-31 17:44:23.281747989|2038-04-03 +-60|-4501|557053197|1493|-28572.29|-3264979.44||true|topology||2069-10-07 09:02:18|2031-11-14 01:37:26.277625746|2093-10-03 +71|27960|-859535015|8779711700787298304|43414.93|-4284284.11|23723101115.607422||forestry|bob garcia|2065-07-01 07:28:35|2040-10-27 09:40:45.325661811|2078-10-25 +74|-17056|672266669|2533|14993.12|4181055.99|2330393617025.130859|false|values clariffication|jessica underhill|||2066-04-21 +105|8328|-44559184||34668.32|-3358597.04|2107086403699.476562|false|industrial engineering|priscilla garcia|2073-01-23 05:17:11|2041-01-24 08:27:17.399682734|1982-09-04 +35|13020|-1491722659|8017403886247927808|-27501.34|-3084795.60|1361019533518.934570|true|biology|ethan laertes|2049-12-25 02:26:54|2057-08-09 18:37:09.476222463|2091-03-14 +|-18151|-1140071443|1282|32571.05|-218555.28|2241361269159.725586||kindergarten|mike laertes|2038-07-20 14:27:31|2076-01-14 17:32:40.202975480|2076-03-16 +-55||-1960344717|2177|-44515.88|-75942.87|873619012141.020508|false|opthamology|holly steinbeck|2037-12-11 11:02:50|2068-04-24 14:56:03.866851484|1973-07-26 +-74|-12876||-8632237187473088512|27660.85|-3161619.93|3454616098419.087891|false|kindergarten|nick laertes|2054-07-06 18:44:09|2049-03-20 15:41:29.13229610|2043-01-06 +-121|-11110|2022944702|8109381965028548608|4900.65|-1214641.94|-782319272106.413086|true|biology||2077-04-19 06:19:04|2080-04-29 19:51:25.500331032|2029-03-19 +29|67|590719541|1157|36502.48|-4871524.01|-2077483805740.065918|false|wind surfing||2034-08-25 19:02:03|2037-08-20 21:02:49.427519908|1992-09-10 +-45|-24911|1870464222|7378993334503694336|-16461.48|2813386.84|-2450662360123.097168|false|quiet hour||2024-08-11 09:42:40||2059-08-24 +-72|2074|1013517056||-42455.06|-142043.75|3719431660678.935547|false||xavier ovid|2023-08-30 07:09:18|2018-09-20 23:47:19.977608647|2007-01-02 +-110||-1946023520|2560|-46654.69|407244.54|-622449463768.788086|false|industrial engineering|irene brown|2049-12-25 17:32:04|2044-10-31 04:37:40.524530302|2017-04-21 +-71|2335|1003667927|4037|-40079.96|374952.12|4337472356507.205078|false|topology|mike polk|2018-06-24 18:36:35|2064-11-10 09:43:52.16046799|2028-12-03 +-54|-4364|1775355987|-8562524688907485184|-10552.10|-494146.63|2527866513015.055664|true|nap time|david ovid|2036-03-05 02:43:58|2033-02-15 05:01:02.667963573|2061-02-21 +-12|-24847|1471913583|2325|26530.25|-11708.56|2137890018516.585938|false|zync studies|rachel robinson|2041-10-21 00:45:05|2031-10-12 16:59:24.57008285|2099-02-12 +48|-4923|2051470532|6962726713896484864|-31562.99|-3107233.90|1876626916355.661133|true|american history|rachel zipper|2047-01-05 01:41:41|2067-01-14 02:21:16.602793258|2104-10-13 +-50|-31709|-1379039356|8120593157178228736|20553.47|-2588951.95|3745237629497.139648|true|geology|yuri white|2072-09-13 15:30:55|2040-11-14 23:29:32.319041291| +87|26324|-1709117770|6924820982050758656|10187.76|-787959.05|-455009287847.438477|true|opthamology|calvin miller|2037-08-01 12:37:21|2059-09-14 10:15:36.572649743|2083-11-10 +-32|20746|1592153312|-7366430883634929664|-48828.20|3542628.51|-4174317808621.062012|true|religion|ulysses young|2022-07-28 16:59:23|2055-09-14 11:55:11.844515490|2032-03-31 +|-23284|-2071851852|-7209060152494817280|34473.73|698236.93||false|debate|ethan ichabod||2039-04-13 07:12:39.959754595|2051-04-11 +-79||488559595|-8689606130068611072|46873.75|-4585927.20|1922555768632.854492|true|history|ethan hernandez|2016-07-20 02:40:21|2014-04-06 14:33:25.947936182|2006-02-12 +-124||297577612|3190|26589.02|2183845.19|-1193317944887.937012|true|geology|bob ovid|2014-05-18 11:54:56|2059-07-31 15:56:05.368353709|2063-06-28 +43|1762|-2119539915|3725|-45749.50|3070034.88|-3994813234986.907227|false|industrial engineering|holly carson|2039-11-03 00:15:58||2092-10-07 +-38|-28098|-890374552|-8581765103969312768||-3283040.77|2656201211529.278320|true|industrial engineering|gabriella polk|2029-01-04 19:28:12|2014-02-05 04:57:17.37210387|2038-02-26 +-67|-9676|-1889139541||-11999.87|-2509288.07|4834823806574.134766|false|linguistics|irene davidson|2039-06-08 03:04:50|2044-07-10 14:18:16.503028090|2094-11-08 +-59|-15053|459269456|3542|24044.42|3949871.39|3217181341598.660156|true|kindergarten|mike nixon|2054-05-19 16:38:12|2053-10-27 15:11:39.341374749|1984-09-17 +45|-9065|566646177|8553195689344991232|-8135.26|-1939444.49|1642913015191.496094|true|forestry|calvin ovid|2027-08-13 20:32:10|2034-04-21 19:49:54.457098155|2062-06-17 +12|-4618|-1098379914|1789|36168.36|2705339.34|-3690347823231.547852|true|biology|holly johnson|2042-06-30 11:41:46|2018-07-12 01:39:37.64101956|2056-03-04 +71|27905|-1755088362|8698055291501543424|-19752.00|33234.12|688042997736.531250|true|american history|oscar ovid||2053-09-30 17:29:18.665697992|2009-03-20 +-71|-18503|597657990|296|-11362.53|-4728535.24|1779931909010.094727|true|religion|david falkner|2072-03-01 20:11:45|2037-09-21 12:33:06.143755779|2000-03-05 +11|10940|1687784247|-9095689235523264512|4559.16|-3019879.00|-874527173245.560547|true|zync studies|bob king|2014-07-11 06:14:44|2077-11-30 06:12:58.616698981|2034-11-19 +-50|5591||7998687089080467456||285146.00|3230421493464.880859|false|quiet hour|priscilla ellison||2042-01-18 02:32:10.14671907|2094-02-22 +-90|19986|-1808960215|8160569434550403072|-36016.61|-2975819.86|4812496108620.009766|false|zync studies|ethan davidson|2040-08-07 19:44:11|2076-10-03 23:03:12.999374869|2062-08-07 +4|-21009|-928013434|489|37854.16||4399713240319.478516|false|values clariffication|ulysses hernandez|2047-03-25 04:38:53|2054-05-31 05:02:46.363507150|2069-11-23 +20|-23241|-1701492480|-9175038118837149696|-26613.47|1842617.05|-874710832768.563965|true|american history|jessica carson|2037-03-24 21:59:48|2075-10-04 10:31:47.885981681|2055-06-11 +119|-6384|1187495452|8571268359622172672|-8449.84|-4234142.32||false|american history|xavier ellison|2058-04-04 16:57:08|2031-10-24 10:16:43.56250862|1981-11-13 +26|23468|-2136052026|-7916510129632296960||-3375.58|-932636901856.333984|false|||2063-08-14 11:27:00|2029-05-07 11:16:44.211702565|2093-04-29 +-43|24298|-1524554771|8323460620425330688|38854.35|-4288521.90|816149960555.847656|true|joggying|luke johnson|2039-12-24 23:32:35|2074-12-19 05:35:31.748853096|1994-05-03 +|-11767|-938756287|346|-14372.50||2123388956108.485352|false|philosophy|fred steinbeck|2023-06-14 17:07:18|2013-08-25 12:57:38.526376049|2068-09-07 +82|-6334|-564495517|3980||-3367097.22|-974002789414.605469|true|american history|sarah steinbeck|2077-09-25 01:00:46|2031-02-06 14:50:01.97188046|1986-09-18 +-34|-13335|1568180994|-7707242953271500800|43312.27|-4190279.40|3157550021812.021484|true|education|alice underhill|2077-03-07 09:32:11|2061-12-11 17:47:50.656826729|2033-07-02 +-41|10278|-922200749|1811|-5675.84|2126493.90||true|linguistics|ulysses polk|2021-09-26 02:58:42|2037-02-07 22:52:41.845445061| +-3|16134|923980398|2803|47958.63|-123069.85|3302684840602.587891|true|xylophone band|katie van buren||2017-11-28 19:30:26.747429591| +-48|-27284|-215703544|7370078518278397952|-44853.80|2971392.13|-2865549427171.991699|true|values clariffication|yuri thompson|2040-10-13 04:09:32|2042-06-15 18:08:24.614905000|1987-10-30 +122|-21805|1543611951|7497276415392407552|-21005.13|2798310.44|-4898025473643.805664|false|american history|tom quirinius|2027-09-08 19:38:10|2073-02-25 17:53:54.931182325|1974-09-07 +29|-4772|-2028355450|2323|-14463.60|-2063292.11|3614336014381.258789|false|quiet hour|quinn thompson|2040-10-17 05:30:18||2073-04-07 +22|-23622|916057807|8467976965865799680|-45069.06|-3169540.55|-3121144891991.676270|false|american history|katie ellison|2039-06-07 19:13:41|2064-10-13 03:30:27.872003019|2064-06-14 +54|25636|-1156193121|691|14845.29|2712830.87||false|industrial engineering|yuri johnson|2075-04-20 17:39:07|2036-11-16 16:14:15.788195860|2053-03-05 +61||-1462331586|1914|-19390.54|-2888414.09|-1458422440761.921875|false|topology|gabriella xylophone|2043-05-11 03:31:25|2016-03-24 12:00:25.125315901|2067-01-30 +-68|-27049|-941433219|6982145326341423104|-17847.79|2416407.91|3913372951778.839844|true|quiet hour|oscar white|2016-12-13 22:46:01|2042-12-23 09:34:08.712953951|2092-12-18 +-14|-5357|-671853199|-9203804401302323200|-49229.73|3168248.28||false|linguistics|fred falkner|2045-09-30 02:09:20|2069-07-07 15:06:02.724854172|2093-12-04 +-125|-23546|344239980|7823874904139849728|41579.55|1950776.66|1201402518499.143555|false|joggying|david hernandez|2034-11-18 20:10:51|2054-11-24 08:43:56.953788150|2105-09-20 +54|26155|-532755480|7534145866886782976|6967.23|-3226525.02|-1855303775517.675781|true|debate|rachel van buren||2055-01-08 05:36:23.516296700|2012-04-21 +-1|-14551|76381404|9085434340468473856|-35645.13|2319559.14|-308755508523.432617|true|quiet hour|rachel garcia|2017-01-24 05:00:43|2013-12-09 10:54:08.794663094|2052-11-16 +-97|-3619|-1545388906|8579974641030365184|-21168.24|-2764375.21|-3347449615248.976562|false|values clariffication|bob ellison|2055-05-20 19:13:26|2033-04-21 06:38:32.498310577|2017-11-30 +100|-6024|1723691683|8536948829863198720|47627.04|-1569764.42|-2866718883273.335938|false|zync studies||2054-06-22 01:40:38|2058-02-06 15:15:47.940520185|2072-11-26 +126|5516|278643258|341|-11228.83|634413.34||true|history|xavier ichabod|2022-09-19 23:14:38|2079-08-22 14:14:01.57697301|2067-03-01 +78|-10439|742866312|-9102482277760983040|17001.89|3890002.99|2756529687561.446289|true|history|victor zipper|2017-08-01 15:57:45|2070-05-02 05:20:55.632052613|2030-10-04 +-103|32210|-1254129998|658|-29957.29|-2795444.37|75939180315.778320||mathematics|ulysses brown|2027-08-02 14:55:50||2081-08-17 +80|-5089|-588547970||40137.08|4196447.38|2249064278951.813477|false|values clariffication||2044-10-13 20:35:22|2063-07-28 15:07:41.30831543|1990-09-22 +-17|-14705|-887790938|2843|27083.98|-4094095.34|-4538477190343.535156|true|study skills|priscilla ichabod||2060-07-21 05:29:07.891389875|2020-08-08 +41|-22910|1410516523|7584007864107778048|-26850.55|1924125.21|2728934571209.719727|true|education|yuri ovid|2069-09-25 12:15:22|2081-01-21 23:09:27.167408818|2105-06-13 +53|-16247|-186764959|590|-43932.82|1488803.45|426645021297.744141|true|yard duty|xavier steinbeck|2052-08-30 02:52:56|2057-09-22 01:17:56.602173912|1977-06-25 +124|-1067|-2146432765|8899122608190930944|-8534.65||3325424877285.349609|true|religion|nick van buren|2058-01-20 15:06:15|2077-04-04 20:32:46.931020333|2026-01-11 +-36|27156|850625480|3588|42187.91|1811969.63|-3976647851697.982910|false|topology|zach allen|2014-07-18 20:02:05|2039-08-04 00:06:01.390839073|2058-05-02 +104|10457|-1380191654|3609||4276263.85|-265494965739.847656|true|chemistry|wendy steinbeck|2076-10-08 03:34:56|2043-11-26 06:58:33.589491132|2082-02-15 +111|-260|1372224352|3824|-10063.81|3381864.81|4651675165956.277344|false|education|tom underhill|2015-09-12 05:40:29|2024-06-13 07:25:09.53150568|1983-10-31 +-41|276|-2124994385|7690986322714066944|10570.59|1504218.24|4721071679740.945312|false|undecided|ethan young|2057-12-06 16:20:41|2044-12-03 15:12:53.223286147| +-98|-4286|1074488452|7765456790394871808|34635.92|2380624.11|1160277570495.260742|false|industrial engineering|tom polk|2046-08-09 21:35:24|2063-02-09 19:10:28.968599555|1985-02-08 +5||-500921094|-8649711322250362880|2739.41|2974096.30|-3103906430076.932617|false|debate|irene van buren|2078-11-04 19:08:18|2030-03-03 09:27:14.362039938|2065-12-20 +-106|11895|-628446314|1948|-16324.00|-4498818.60|1427707877153.423828|false|debate|||1970-01-01 00:00:00.172894766|2080-10-14 +86|-16390|-1918847735|-9101953184875757568|-11869.39|2819946.57|597670762650.892578|true|geology|priscilla miller|2048-11-20 13:09:10|2027-04-22 12:21:15.759028066|2038-12-26 +52|-30492|2031604236|2463|38249.67||2340697225707.053711|false|nap time|quinn johnson|2027-10-15 08:46:59|2066-12-22 01:05:49.826136001|2088-10-12 +-69|-13410|-738157651|1813|20147.37|4298419.86|-471086287570.549805|false|kindergarten|david allen||2078-02-24 23:37:09.324361343|2023-05-09 +50|-12422|-1266138408|7054271419461812224|33076.17|4726793.74|-1825975796516.701172|false|study skills|gabriella steinbeck|2034-09-27 07:53:11|2067-05-04 08:00:04.76094355|2045-08-30 +119|6913|1836499981|7548958830580563968|-789.80|-4853521.51|-2520439759952.566406|false|education|yuri davidson|2037-12-20 12:22:00|2060-03-04 05:27:06.411478902| +-68|22588|2084666529|-9206329156028112896|-46857.55|4518647.92|3514007061626.251953|false|industrial engineering|mike brown|2025-09-06 20:02:42|2017-06-08 12:00:40.56190634|1976-03-22 +30|-26180|1522208504|2637|24228.24|4916304.14|2690767997858.607422|false|study skills|fred hernandez|2018-08-31 02:11:20|2034-06-17 01:40:03.264737765|2005-11-23 +-106|-25689|-693249555|-7661250850555633664|-24825.24|1483670.11|-4990192589949.166992|true|geology|nick king|2019-01-30 15:29:01|2016-03-23 21:48:37.760268674|2081-06-09 +113|24019|899810881|664|44245.25|4097479.70|294259545397.430664|true|biology|quinn garcia|2017-11-03 16:12:54|2065-09-09 02:55:30.992457875|2044-04-01 +-73|-7873||2487|-21944.26|-1353294.67|3144741114101.847656|true|american history|sarah quirinius|2068-12-01 14:02:38|2025-09-28 09:32:04.373329529|1984-03-30 +-29|-4211|-1626062014|8221561626658881536|-17167.97|2376404.62|-2396766266715.104492|true|religion|jessica allen|2076-10-17 13:16:32|2041-07-29 04:57:59.28367872|2052-06-27 +76|-19545|1765874562|8169878743136043008|4737.74|2522751.77|4492331567972.839844|true|undecided|mike ovid|2025-02-11 02:25:59|2057-07-18 07:47:20.80373888|2035-05-28 +120|15578|1668094749|6927260280037097472|-12442.12|-3625208.20|-2505949310240.964844|true|values clariffication|luke carson|2060-08-28 06:39:49|2042-08-21 19:21:53.22689204|2036-04-04 +-121|-32403|-884796655|342|43804.75||3086918973716.556641|true|philosophy|oscar robinson|2042-10-22 23:52:01|2027-05-27 10:44:21.656527878|2057-06-19 +-2|-27807|1809795770|-7501803640821456896|-31728.95|2217700.27|1303564912300.209961|true|linguistics|david ichabod|2058-03-12 04:48:33|2026-10-07 09:37:11.609376049|2096-11-02 +39|-2410|1134416796|2745|32844.17|-1496658.25|-634697885935.402344|false|opthamology|quinn thompson||2043-04-08 21:14:44.886763154|1974-03-31 +57|-3449|-1038565721|677|-32939.72|3205392.52|-3723568349288.726074|true|yard duty|wendy laertes|2039-08-20 17:31:07|2043-12-22 07:02:29.625016869|2068-08-18 +-52|-19028|-2081809883|8435912708683087872|-49142.74|-1903864.82|3512222649259.086914|false|mathematics|gabriella brown|2040-06-21 05:21:16|2058-07-03 23:16:43.395150800|2059-05-01 +-123|12674|1817671655|7412924364686458880|22076.92|1732310.78|-198468750423.259766|true|values clariffication|yuri van buren|2062-02-20 04:56:42|2074-11-24 19:41:00.713104566|2003-07-31 +-76|-7533|1332181668|3563|16233.53||3796582343071.613281|false|yard duty|ulysses laertes|2029-10-24 07:37:56|2071-10-19 15:34:38.872812372|2071-04-18 +-119|-14053|8040290||9147.84|-4731232.91|1812411477716.757812|false|values clariffication|holly ichabod|2031-01-05 20:55:56|2050-12-24 02:26:09.954349620|2097-09-19 +110|-17426||7153922334283776000|10695.41|-4157898.82|-3205627224037.357422|true|yard duty|calvin ichabod|2022-06-25 07:08:22|2061-01-13 03:07:32.453720097|2066-09-03 +-28||718692886|8849475396952514560|-22341.30|-1839215.40|-2738602396779.903320|false|opthamology|victor white|2030-06-25 00:58:04|2061-03-21 18:52:13.283910292|2095-01-04 +90|-16129|-2007662579|2977|15304.02||-4049128118326.663574|false||wendy quirinius|2080-06-24 16:38:20|2017-06-18 02:28:45.172735440|2050-09-13 +96|1409|-1259611508|-7910019233726242816|-38530.51|-44517.83|-3803225081574.588379|true|industrial engineering|calvin brown|2017-03-12 03:01:53|2033-08-29 02:29:46.117205169|2010-04-10 +117|-32688|144428297|2835|-20190.35|3425255.76|1239390523307.939453|false|opthamology|rachel falkner|2056-11-05 14:04:47|2075-05-31 12:50:08.854908788|2084-04-26 +55|999|1281277970|2335|-2914.82|||false|opthamology|fred young|2072-08-24 05:04:35|2058-05-10 01:43:32.344081382|2029-07-31 +41|28165|1069486136||-37952.82|-276339.96|3109359968514.944336|false|opthamology|victor ichabod|2069-02-08 14:25:38|2076-05-15 06:44:30.554693842|2053-03-25 +-86|23850|-601946913|2515|10326.69|4649531.05|2268855975314.246094|false|topology|ulysses robinson|2013-04-25 03:23:27|2056-06-03 12:33:31.602556689|1984-05-06 +-101|718|386741352|-7617860842651017216|-42832.01|2064169.43|-4943806409743.167969|false|industrial engineering|sarah ichabod|2057-03-27 09:22:04|2064-01-08 17:40:10.787568720|2020-01-01 +-127|-10662|648935848|-7637755520917741568|-33941.20|1359686.42|1579030228803.083984|true|chemistry|luke laertes|2016-12-23 11:45:20|2036-05-11 12:47:07.733851638|2003-04-13 +80|-1223|92834720|2647|-36870.03|1719246.27|2540178498144.847656|true|nap time|bob falkner|2038-06-08 07:54:27|2042-11-05 13:52:50.588354549| +-3|22320|1343581455|707|15471.72|-4962575.43|-1611227671309.833984|false|study skills|mike robinson|2078-04-16 04:19:35||2048-11-11 +||-4943292|8856674723376668672|2164.62|893829.85|-631328089657.294922|false|opthamology|oscar quirinius|2027-03-09 19:18:57|2054-01-23 04:24:15.292126981|2060-03-20 +-50|2616|977624089|7857878068300898304|-30457.66|-1477221.11|859058050811.647461|true|american history|ulysses ellison|2035-05-02 05:58:22|2050-12-28 10:26:48.409801540| +3|-17916|-191704948|-8887058200926093312|35825.27|4647161.69|2477546519048.629883|true|mathematics|wendy carson|2027-09-25 05:34:03|2032-03-05 14:53:42.83179281|2024-12-19 +100|-5919|-835107230|108|-37662.39|-3446860.49|4914447452945.439453|false|kindergarten|sarah carson|2031-07-27 00:13:59|2066-12-30 00:14:45.43797107|2058-07-30 +37|-24696|314232856|2762|-9928.97|-2740875.06|3528496539480.343750|false|geology|fred thompson|2032-05-15 14:11:38|2057-07-08 21:29:58.104912394|2091-09-09 +27|-8398|-1583445177|3622|32876.52|285737.42|-4676053735656.750977|true|undecided|luke xylophone|2071-11-07 15:31:50|2021-08-10 08:22:04.527867145|2086-02-10 +|-14644|-2133145181|868|-5084.93|-77615.10|4647718670978.201172|true|chemistry|yuri thompson|2030-06-15 16:57:13|2063-01-20 09:04:49.362142228| +36|-7046|843282593|138|-1437.75|1530832.04||true|biology|zach van buren|2049-05-13 12:51:19||2039-05-11 +31|-10258|-1009249550|1786|-45853.99|-2996009.61|-1270664267650.971680|true|zync studies|calvin laertes|2054-10-23 17:10:03|2054-10-22 11:44:42.793294685|2064-05-02 +||-236700442|9116137265342169088|13586.84|-3397114.60|1665543214302.829102|true|biology|holly carson|2076-12-15 12:30:43|2067-07-10 06:37:03.526969971|2038-06-13 +-8|-25988|1447438548|7955126053367119872|-37404.36|-4278525.05|-1539681145987.756836|true|forestry|david thompson|2056-07-24 05:41:13|2038-11-27 05:16:00.826863713|1976-09-08 +-93|4747|-201554470|491||-4736059.79|-4074148068973.322266|false|religion|luke brown|2052-04-12 11:55:51||2019-08-20 +18|-26064|-807242371|-7778829032042790912|-497.18|3018359.10|4165268868128.960938|false|history|quinn xylophone|2064-08-04 14:39:21|2049-07-11 03:38:36.188505486|1979-06-14 +-2|28921|-1599905147|-7753051494275432448|38746.13|1924853.86|2728705276812.348633|false|chemistry|mike miller|2063-11-22 05:40:00|2040-04-01 02:20:42.508563674|1999-07-08 +|-26946|-329695030|8962097525980225536|47503.36|-817384.58|955232900660.511719|true|zync studies|holly van buren|2020-01-09 00:26:09|2033-08-15 04:18:59.595230815|2066-06-06 +0|-2835|1968813171|8163948965373386752|8908.65|-3820148.18|-481479853467.270508|false||sarah miller||2068-01-02 01:54:57.346312106|2075-09-25 +114|20265|669484010|1145|13147.10|-1843756.18|554432518470.839844|false||yuri ovid|2039-05-04 13:47:10|2038-08-21 17:57:56.403669763|2081-07-12 +|28775|232405034|-8438554249514491904|46640.35|-4159360.82|-255361794196.424805||education|quinn robinson|2026-01-11 21:41:33|2040-12-26 20:16:11.824788390|2024-05-19 +111|22074|-853606287|522|35504.72|1790821.65|38856848277.219727|true|american history|alice van buren|2054-05-22 23:30:43|2046-11-03 15:13:41.781244421|2079-09-30 +55|4319|-524189419|1785|34553.22|-1096127.27|4436479409217.000000|false|biology|xavier van buren|2079-08-25 13:19:06|2080-03-03 09:31:40.884489078|1979-09-24 +51|-800|564366133|1545|-31100.82|-1936194.76|1798080391637.881836|false|undecided|alice xylophone|2065-08-30 18:15:20|2055-12-07 19:37:09.85869965|2078-10-08 +107|11159|-346607939|999|-24238.72|794247.13|835410305018.271484|true|forestry||2072-11-24 12:45:43|2027-01-26 04:52:45.940702976|2001-01-27 +-116|-31182|-54793232|1941|-33327.99|-4081209.43|-4789172370818.662109|true|wind surfing|jessica van buren|2062-10-28 14:12:34|2047-11-19 19:37:15.731253997|1983-02-23 +-10|-32371|1333214263||-32279.82|4486493.62|4879631964298.525391|true|undecided|quinn falkner|2045-05-25 12:09:59|2068-07-04 02:37:10.599103732|2089-02-09 +80|21377|-267554590|7454442625055145984|8701.65|-905741.94|1537464945669.313477|false|joggying|david van buren|2077-05-12 07:35:39||2061-02-10 +-104|-3190|197056787|3510|-22360.28|4523037.34|3240433039663.095703|false|xylophone band|xavier white|2051-07-06 22:05:57|2051-01-02 05:43:39.718899624|1996-03-18 +-64|6163|-1602792666|2373|16736.31|-3740028.62|3690526967552.294922|true|joggying|yuri brown|2040-02-10 20:14:48|2080-02-11 07:48:36.902436157|2000-11-17 +-30|-24670|1701817607|-8127494999848919040|-13803.97|3963022.39|1474152640583.561523|true|geology|tom hernandez|2079-07-30 04:14:31|2057-05-04 01:14:00.609761058|1974-09-16 +-74|-23109|1346627771|1643|-43168.85|3155689.66|-4448781974974.628906|true|forestry||2039-08-04 16:00:45|2028-08-01 19:19:13.322219484| +118|-23389|-370093295|-7819437864839495680|-26609.73|-4237190.22||false|kindergarten|bob thompson|2019-09-22 15:35:21|2037-04-28 10:43:14.893096143|2021-09-12 +69|24253|60847311|-7822452149325094912|-40655.11|-3881485.82|-1314247053468.484863|false|debate|oscar young|2067-05-18 04:05:27|2039-10-06 08:12:08.884589651|1984-06-30 +-5|-32000|-177025818|7411793502161182720||2485530.29|1594683877449.583008|true|opthamology|yuri young|2035-11-20 23:20:18|2078-01-19 12:17:21.914800889|2015-02-06 +-35|7474|-1676261015|2274|38942.74|-37023.70|3451540509748.127930|true|opthamology|katie polk|2043-05-05 01:41:36|2044-09-13 08:37:07.823303166|1973-01-15 +-102||-714270951|8783241818558193664|-24442.85|50032.07|3379969757973.632812|false|topology|calvin xylophone|2049-08-05 08:30:47|2065-10-10 06:49:24.92342328|2078-07-28 +84|-18485|345556325|8316336224427483136||4778.81|-213703503553.759766|true|geology|irene laertes|2027-03-23 13:59:49|2068-08-29 13:45:16.22118413|2004-10-29 +57|-19535|-1484033125|-7669169138124275712|5132.56|-3812127.71|-2880380214433.984375|false|values clariffication|xavier ellison|2013-05-28 14:20:54||2032-11-10 +19|-16815|1440427914|2984|-33347.21|-2217545.87|-1622352479297.113281||xylophone band|katie garcia|2037-08-01 18:21:41|2074-09-05 16:07:02.893370421|2050-05-01 +-42||-1201785350|-7772064021830574080|-19385.56|-2417269.98|4254639589730.294922|true|wind surfing|quinn thompson|2056-10-03 11:35:20|2050-10-14 15:57:47.94057206|1994-04-07 +-26|-20787|-2066134281|3397|38532.91|-2052731.66|-2212037711168.371094|false|xylophone band|alice zipper|2081-02-22 01:21:53|2066-09-15 08:59:43.712981923|2068-06-30 +-27|-20093|-226635871||39730.55|2920764.76|3736425396480.705078||american history|alice davidson|2063-06-30 00:25:29|2039-10-04 08:34:32.485344607| +-52|17720|2134433675|8523972434954510336|-41237.62|713755.68|-2484484479175.712402|true|education|irene ovid|2035-11-11 02:58:59|2036-10-18 01:59:34.57776882|2071-03-20 +-28|28089|260463232|-7127548949860818944|44331.03|-3844217.64|-1326537974723.827148||undecided|ethan davidson|2041-01-01 12:36:24|2023-11-30 11:12:45.269637203|2029-01-15 +3|6587|-916495008|8286706213485297664||-454411.58||true|xylophone band|priscilla polk|2066-01-13 23:59:59|2028-04-05 20:31:53.571987516|2031-08-14 +-96|-7065|1102069050|3147|-5252.56|2525210.28|-2140534616823.686035|false|wind surfing|oscar polk|2078-12-19 18:51:30|2024-01-19 18:54:53.308875328|2070-05-01 +-87|-134|-1289501869|-7536330682873937920|-32963.40|4026456.79|-4233812360710.160645|false|american history|irene king|2076-12-17 13:17:28|2026-03-07 02:44:03.954816308|2039-03-16 +-88|-29977||-7115054815375073280|-49346.30|-4958976.45|-2795424166054.196777|false|mathematics|zach steinbeck|2033-10-10 03:50:46|2072-01-30 01:12:19.980530173|1988-11-14 +-54|11077|-235238928|-7319315187617587200|-21327.60|3102506.04|3723093064701.678711|false|biology|quinn hernandez|2050-07-10 07:39:25|2067-01-01 12:42:39.897268043|1977-03-31 +-127|14408|1390704286|1099|-26406.94|4860414.94|4855359545894.408203|false|quiet hour|mike miller|2057-01-23 19:19:39|2064-10-13 08:42:40.250692511|2030-08-25 +-110||1310360849|-8989473881707921408|29776.70|4921339.21|-1711490983970.094238|false|american history|mike van buren|2017-06-03 10:24:21|2017-06-10 20:23:52.816880923|2042-06-26 +-108|6226|-912429611|2816|30162.78|682391.80|618292737301.046875|false|debate|sarah steinbeck|2059-03-16 19:54:08|2015-05-02 04:32:12.949465541|2008-06-28 +60|18845|-1369253050|-6986178228432322560|-35269.73|2961506.86|1006481369666.687500|false||katie white|2022-02-26 02:53:55|2031-08-28 00:04:15.166189356|2054-08-15 +46||-938762477|-7759425383684849664|46392.36|1976120.50|4269437008221.785156|false|mathematics|ethan steinbeck|2061-01-29 15:15:46|2064-07-10 10:33:03.635139975|1978-09-04 +-71||268888160|-7893577088764174336|-35474.68|-4088895.16|-617279427364.288086|false|nap time|bob hernandez||2040-02-11 11:11:34.256681234|2061-05-04 +91|22232||8091421389575282688|9502.48|-3837325.10|-1866384122906.517090|false|zync studies|holly young|2043-03-07 03:52:18|2033-07-28 01:11:14.500349492|2010-08-03 +77|-6884|-624029057|-7409653086454030336|39017.19|-2245679.39|-997439424357.757812|false|debate|irene robinson|2077-03-18 03:11:00|2040-04-20 17:23:11.66448920|2025-08-31 +-101|12411|-1940205653|7348598907182800896|43885.62|-4414072.29|-1555511953931.052246|false|american history|david xylophone|2029-10-21 01:48:10|2043-08-31 14:12:30.675070864|2085-10-24 +-57|11804|-496915240|-7362189611124563968|-8203.40|3975044.25|1092860346014.425781|false|chemistry|oscar allen|2042-05-21 21:47:45|2031-01-19 14:17:45.573707182|1978-09-30 +59|27945|474795096|||-1041376.93|1049208781647.408203|true|zync studies|mike young|2042-08-04 13:26:56|2076-09-18 22:30:21.635931282|2100-08-21 +|6490|-1819075185|2465|271.01|-3315662.78|178512873486.944336|true|philosophy|luke miller|2058-11-13 12:26:19|2055-04-29 01:32:52.338899070|2072-05-20 +59|-13144|330302407|350|4198.95|-3293708.78|-837074147937.393555|false||calvin davidson|2048-05-03 07:09:22|2016-02-14 09:35:08.980648052|2050-12-28 +-58|-8895|1895751360|2619|44281.94|-3137628.74||false|topology|ethan underhill|2067-05-09 03:59:27|2070-01-06 00:22:49.266686299|1988-03-03 +41|-5283|-1322736153|3722|33698.34|4876900.77|2424764462849.715820|true|wind surfing|jessica polk|2016-04-22 18:45:33|2078-05-27 02:08:56.801166778|1997-08-03 +56|-22608|338805871|898|4188.81|4744554.21|730519058324.112305||mathematics|priscilla falkner|2069-12-21 21:41:25||2070-01-08 +-56|10702|-1552053883|782|-38874.21|-1017367.57|4561020549763.021484|true|religion|calvin xylophone|2071-01-01 17:03:06|2016-11-14 05:12:35.708167457|2080-10-11 +17|-9585|-71433796|1780||509725.97|-1030876895734.564941|false|religion|mike johnson||2077-05-06 09:41:04.29662719|1978-11-15 +110|-24095|-1655396452|2186|-45152.50|3996472.52|888655102684.872070|false|quiet hour|mike hernandez|2065-12-16 23:03:21|2065-07-26 03:28:40.392848789|1991-03-29 +52|21673|-1066775085|-6921654334727036928|-9981.07|-1650150.10|-4371310034780.968750|false|history|katie zipper|2031-09-27 21:23:48|2080-08-25 23:49:27.604332416|2069-04-08 +-113|20052|-1447140800|4020|-22503.13||-4288708971525.778809||wind surfing|mike underhill|2079-05-03 00:37:57|2078-03-29 14:33:49.853013719|2043-01-06 +-61|28000|-178568841|8325227661920133120|-22983.25|1809174.68|-2297090489976.945312|false|history|nick quirinius|2069-09-27 08:08:25|2043-04-20 02:53:33.962471217|2044-06-19 +102|-31458|-1933192293|-7036607470351654912|-21380.15|1896367.69|1605659965343.057617|true||rachel white|2015-06-12 13:38:42|2068-03-09 14:21:05.750561437|2099-06-14 +-8|-25417|1961954939|7304839835188609024|-41942.73|191389.71|-4592978910553.853516|false|undecided|nick ovid|2035-11-09 10:25:30|2077-04-15 14:29:55.59700199|2010-11-12 +41|-10569|-745678338|8792059919353348096|-14404.47|-906177.24|326664391884.108398|false|philosophy|priscilla steinbeck|2034-04-20 09:29:51|2042-10-01 01:12:43.143080614|2048-10-17 +-41|4233|618321042|-8856821118526734336|-27396.30|4504910.53|180650253126.657227||debate|tom xylophone|2027-02-11 05:29:58|2066-07-09 11:19:47.657330816|1995-12-16 +-93|18820|825677248|8720504651219001344|9437.05|-4519097.40|-506526954412.187500|true|philosophy|alice miller||2077-05-05 12:20:54.799217277|2001-02-16 +33|-3103|371383749|1055|-43829.05|-3580973.49|3029406193241.257812|true|wind surfing|jessica quirinius|2026-05-11 10:04:20|2074-01-01 07:56:24.948489929|2096-02-29 +-95|26529|-223311809|1368|-19919.52|2770068.53|-2537836240149.431641|false|debate|gabriella miller|2040-03-14 14:51:41|2037-09-24 04:28:20.146749934|2102-01-22 +79|-28084|-1974972123|8736061027343859712|-28922.94|-2498365.96|-985441433336.760254|false||rachel laertes|2032-06-23 14:29:26|2023-01-25 12:10:28.866537625|2105-06-23 +70|6781|2125311222|7919597361814577152|47154.09|-1192322.16|-850435843660.583984|false|religion|alice polk||2065-10-31 11:02:12.417523639| +-1|22463|867587289|7381659098423926784|-29302.26|-2211405.89|1156004227362.394531|false|study skills|alice white|2038-06-11 11:52:35|2026-02-06 12:13:27.232943870|2090-01-01 +15|7392|869288953|8731960288562044928|41942.98|3624760.68||false|zync studies|alice miller|2058-09-06 21:08:50|2060-02-22 03:34:09.543087589|2076-07-25 +-57|-24942|824743780|-7594824008626372608|35489.13|1006528.68|-4120539026250.966797|false|geology|priscilla falkner|2066-03-31 07:55:39|2053-11-01 10:52:13.944770605|2080-12-02 +-26|-14810|-1407817977|-9178166810751909888|37075.67|-4876175.79|2103363485344.971680||chemistry|tom quirinius|2026-05-24 15:05:33|2014-12-24 04:57:37.782711517|2000-06-22 +-70|6484|-385247581|3083||-2073947.26|890806455827.118164|true|values clariffication|ethan steinbeck|2018-06-04 10:12:46||2055-12-17 +79|-15946|-1078397698|-8948335470186373120|-22913.97|227203.97|4075094455625.994141|true|industrial engineering|david robinson|2033-07-28 05:43:26|2020-05-29 18:18:23.223813235|1982-05-04 +-75|-17873|-837503491|2569|6361.99|1161789.65|-1726978053243.932617|true|values clariffication|quinn robinson|2045-04-18 18:19:14|2021-01-27 02:30:40.164402829|2017-01-10 +96||1660088606|823|-28084.37|-3275847.50|2762686693256.810547|false|american history|ulysses nixon|2070-08-11 19:10:10|2016-09-07 01:20:11.409060890| +-113|-30515|-922875124|259|4946.14|284363.03|-4858481689197.126953|false|kindergarten|fred nixon|2023-06-28 22:37:13|2049-06-26 22:24:15.711126823|2069-02-06 +49|3186|1636364987|8461498293348065280|-25371.38|-1184912.70|-993292644578.538574||education|alice white|2058-11-21 02:58:56|2065-03-02 04:53:08.176223587|1994-05-02 +63|150|1783034168|-8961059046745669632|||-1661232720992.700195|true|topology|quinn young|2038-09-04 07:44:08|2047-01-27 11:44:14.355445111|2043-02-21 +74|-21648|-1111937842|-8607195685207408640|-38757.30|4280929.21|-3715598460104.345703|true|joggying|irene white|2017-10-30 12:48:38|2013-03-25 15:32:17.128329365|2017-10-02 +79|3251|-125419186|-8754966081778565120|32512.77|1838279.26|-1177230386696.095215|true||zach laertes|2059-05-28 03:36:52|2049-09-18 13:52:36.954456644|2013-05-30 +96|-10126|-41864614|-8418913260807217152|-41581.70|3132782.56|-4989436752632.237305|true|opthamology|victor king|2025-08-12 00:24:07|2025-04-07 14:05:54.461436636|1987-03-18 +|-6474|706823078|-8877053610728161280|-32523.08|2431805.10|959431399353.079102|true|joggying|calvin ovid|2024-11-30 09:52:29|2026-10-20 13:44:56.808161722|2038-08-30 +-85|24858|-1062159435|-6935548339131138048|38973.86|-347553.95|3408594399945.599609|false|biology|oscar zipper|2026-05-18 20:07:36|2018-09-17 07:34:48.856373956|2044-03-13 +-44|30883|1452244326|-8219876839318716416|24860.92|-3405708.68|-3460325348420.880859|true|education|wendy quirinius|2038-08-09 15:24:29|2054-12-15 15:20:18.843218245|2061-01-18 +88|-9076|239078089|1132|28538.60|-735298.99|-4690879292129.750000|true|wind surfing|zach quirinius|2071-11-14 00:07:27|2047-06-15 18:39:40.150298534|2079-04-15 +48|23320|-1948257321|1337|-43790.15|3928891.37|3704624417356.896484|false|joggying|tom hernandez|2053-10-13 18:39:56|2026-03-31 20:18:01.770207236|2034-08-29 +-98|7197|492120544|1341|-2808.88|-686072.73|-803606583331.628418|true|education|tom xylophone|2015-05-08 21:19:10|2077-10-24 08:05:04.192595595| +72|7058|-1563676282|976|-3004.90|1860479.52|-706916831724.306641|true|joggying|victor polk|||2058-01-01 +38|-14661|195281533|-7557017910095650816|-9383.79|-2348124.75|4636426753661.630859|true|mathematics|tom thompson|2067-05-15 15:21:33|2068-10-26 13:18:32.366613022|1975-07-07 +16|-28061|-292588406||23350.65|4918613.06|-3364353490553.036133|true|nap time|calvin steinbeck|2054-01-11 20:28:18|2037-02-15 00:30:35.900105726|1970-08-29 +-104|19636|-1344287228|-8683802826440105984|8738.40|-1978806.64|4981312863247.886719|false|biology|holly davidson|2017-11-02 22:33:43|2061-05-26 11:57:49.809511168|1993-04-14 +-59|-2113|-909024258|1845|41057.97|213462.82|-323293361486.882812|false|xylophone band|ethan carson|2049-05-23 19:46:12|2079-02-24 16:42:08.448187732| +70|-13309|1173098061|1965|1703.69|2429715.56|-2779513442254.710938|false|joggying|calvin brown|2079-05-18 11:45:16|2018-04-22 08:38:53.938611457|2020-02-01 +19|-17269|-1091003492|-8104684579106914304|-1914.23|281948.98|2761939531515.835938|true|kindergarten|holly robinson|2044-05-20 18:29:37|2037-05-21 10:21:17.589154522|2003-03-13 +-96||1768399622|1835|5444.81||-137609076468.194336|false|xylophone band|ulysses king|2031-03-19 20:16:45|2034-05-25 03:38:47.816839184|2064-05-20 +-28|10916|849859032|7345991518378442752|-3117.17|1217133.63|-3849320444967.645508|true|linguistics|irene quirinius|2060-08-22 17:37:02|2013-09-04 14:35:51.471949124|2092-02-05 +10|19458|900992177|3212|36450.28|1766157.28|-1882794242083.676270|true|yard duty|fred king|2042-03-05 10:51:23|2075-06-04 16:36:19.586166561|2094-02-10 +52|-18283|-1969751342|-7081500255163727872|36032.40|4272798.19|-3071209073548.714844|false|opthamology|victor ichabod|2034-03-30 05:28:25|2026-11-18 10:38:29.612055317|2045-12-20 +125|16516|161210995|1074|-24350.82|2460973.22|921740273811.693359|true|linguistics|priscilla thompson|2024-07-31 16:19:23|2052-06-19 19:48:51.128000435|2031-11-09 +62|30936|1321678350|8372588378498777088|1059.15|-3300550.22|3839852559580.664062||study skills|ulysses ovid|2049-11-14 21:22:02|2034-12-30 19:24:13.874541216|2037-11-28 +-56|-23387|-1811563127|-7593363318079610880|-17859.26|-4870696.94|-1931143655238.862793|false|philosophy|jessica hernandez|2017-09-26 18:25:13|2023-12-17 02:12:59.770265368|2026-03-19 +97|10217|1338047392|-7451660755269853184||587440.58|3232315873104.952148|true|undecided|victor ovid|2041-06-18 17:20:25|2029-12-16 14:12:32.298550349|1987-03-13 +50|-12085|-1954890941|1983|-23284.45|129671.37|-2210750828357.674805|false|values clariffication|gabriella van buren|2055-07-03 23:20:38|2018-04-10 00:19:11.305802202|2103-07-27 +52|-13805|415234946|8514851182589771776|-39177.49|3119458.07|-4784612318346.788086|false|forestry|bob brown|2019-06-19 22:27:39|2065-11-12 11:35:37.144557823|2041-11-16 +104|||1864|13318.31|-3863136.50|-4419006380365.076172|false|joggying|luke steinbeck|2057-07-21 14:03:34|2026-03-06 18:45:53.361673899|2031-03-23 +-81|25986|-1643714866|8463868417649524736|1656.77|-1600629.58|-4804769067544.612305|true|religion|luke ovid|2019-02-21 09:30:51|2051-08-22 00:41:24.173555438|2041-04-15 +114|-28840|1335803002|3094|-22269.41|1188501.65|-2026377024912.235840|true|mathematics|david young|2054-03-31 08:13:24|2037-11-06 13:20:44.525490319|2093-08-21 +28|27340|-1117019030|-8858063395050110976|17746.57|-370973.48|3665311301420.927734|true|joggying|calvin xylophone|2016-06-21 09:03:55|2038-11-18 02:43:13.946089513|1996-04-09 +87|6875|-980869630|1981|-8269.53|-680305.04|-2046202768950.067383|false|forestry|oscar falkner|2054-03-28 23:22:20|2057-06-06 19:13:12.719272020|1984-12-16 +-38|19894||-8140349174954893312|42897.60|-895172.87|-2612690969145.852051|false|religion|irene ichabod|2029-06-18 22:32:24|2069-05-05 07:02:12.743691084|1993-05-09 +-94|9948|-455114104|-7041362811802148864|35927.66|2125413.96|928631125022.398438|false|american history|sarah zipper|2075-08-11 09:15:18|2025-01-16 11:03:58.820006581|2047-04-09 +90||1709983738|8972161729142095872|27727.80|3584555.68|2487331275679.380859|true|kindergarten|wendy zipper|2075-07-22 03:37:27|2070-11-18 17:07:35.641105448|2005-06-22 +-55|-4877||7989119273552158720|49881.20|-729941.08|2604782698166.197266|true|education|mike van buren||2059-09-21 17:36:20.371130502|2082-11-28 +-42|-14423|524808|2469|48110.32|544211.41|-1335268776571.894043||biology|calvin van buren|2065-03-12 01:32:10|2054-04-18 22:43:02.130196097|2042-01-13 +-22|-15276|819875108|1481|18012.46|-1803815.34|1672426848879.915039|false|opthamology|irene underhill|2046-07-20 14:26:45|2015-11-05 19:57:02.993769202|2040-11-12 +48|-921|2018442973|-8566856504746352640|-15509.73|3968585.18|-4738889659221.818359|false||tom falkner|2039-03-13 04:17:53|2045-10-21 23:06:17.401033240|2022-09-20 +-118|22006|3999930|8272001752345690112|1419.23|-1029930.54|2861569157429.500000|true|forestry|fred thompson|2054-05-09 19:08:51|2068-03-21 22:59:16.347228701|2104-05-09 +-42|-24805|-632803945|-7094827141662539776|44835.25|3437763.92|2825770424376.301758|true|kindergarten|xavier white|2042-03-19 07:36:49|2036-05-11 16:15:05.180723604|2068-05-04 +-7|28940|-180280420|8396433451610652672|-29405.71|-1689827.09|249301764043.821289|true|mathematics|bob laertes|2062-11-29 07:35:17|2070-10-14 23:45:26.608542925|2070-10-06 +-67|9943|-306214368|-7679894005808693248|-24100.54|4314411.12|-768850812250.221191|true|history|mike steinbeck|2068-11-29 19:57:50|2019-01-09 18:20:03.638956806|2101-06-04 +98|-21357|-1109134719|8613562211893919744|18869.43|-649318.26|-4480567003096.353516|true|philosophy|katie hernandez|2058-03-06 17:09:43|2072-11-27 15:20:11.984907349|2065-01-16 +-105|-20835||3407|30691.28|-1609324.97|-1997137164045.063965|false|history|katie ellison|2045-12-30 05:36:07|2070-06-27 19:59:42.330026099|2072-06-18 +-77|14144|-897622427|7686992843032010752|31772.98|-4662600.66|4055930472890.203125|true|linguistics|tom van buren|2044-03-25 19:36:16|2014-02-02 02:41:41.974712738|2034-12-19 +-124|-30109|-249150336|1048|-49638.39|2822393.96|-3764856023597.680176|true|wind surfing|mike allen|2069-02-23 02:01:18|2023-12-21 20:05:42.508066288|1987-06-11 +-126|-21180|2032271149|3507|9204.15|4100590.22|3343985992703.899414|true|study skills|mike falkner|2032-11-01 08:01:25|2015-05-19 00:53:07.99721897|2034-12-26 +120|-11083|-958165276|7784169796350730240|-32134.41|-1761257.85|1246740864978.441406|true|forestry|calvin johnson|2045-07-19 17:27:46|2060-02-07 10:38:05.44019465|2068-06-26 +72|24446|-1312782341|8551446856960942080|41033.78|-4517017.97|3110337503689.812500|false|american history|ethan robinson|2036-09-10 06:35:49|2017-09-09 22:51:48.511198436|2078-04-27 +11|-4720|-1134786190|3467|28036.67|-3143597.92|-503265260653.940430|false|forestry|katie allen|2034-02-04 09:52:49|2077-04-15 04:42:04.380805925|2068-01-04 +|7197|-1001529082|1458|-971.42|2454201.35|-2783180271476.268555|true|quiet hour|wendy nixon|2074-12-13 00:53:35|2025-04-24 14:10:28.455511332|2029-05-09 +121|26503|-1081328752|213|38702.92||-4432628537861.625000|true|geology|luke johnson|2067-09-14 23:24:55|2066-09-25 14:08:46.769961227|1979-01-10 +65|6240|115111911|735|33453.37|3643181.05|-3331678542651.878906|true|yard duty|priscilla allen||2071-02-20 16:23:36.179620819|1976-02-20 +14|18823|1918230406|9190466190353661952|3909.53|4714913.17|-1411399885652.357910|true|undecided|||2076-02-09 18:31:10.234467554| +-35|5266|-588160623|-8280276629934981120|-45823.61|-3282253.15|-913286552286.223145||geology|david van buren||2069-08-05 13:35:53.308725178|2070-02-06 +72|17394|1467284000|-7895991410072928256|-44874.00|1596795.95|2131825157208.311523|false|history|victor nixon|2026-11-11 17:07:09|2038-03-25 15:28:30.820737074|2021-11-16 +-1||-202409329|-9145593811310010368|44034.44|3326020.44|-1461861566954.816406|true|history|bob zipper|2034-06-12 05:53:28|2052-01-30 16:51:36.858272776|1974-12-30 +-57|896|-251576563|8059284960252731392|36102.20|-4122760.71|844981812716.021484|true|yard duty|luke laertes|2039-12-08 11:56:53|2074-05-17 16:44:26.822467862|2094-03-28 +-112|22505|-1324624386|367|-774.06|-2188747.41|-4462901033248.450195|false|undecided|oscar young|2019-07-26 08:57:51|2017-09-04 17:01:10.939761678|2007-02-25 +-113|17129|735600165|7614435638888210432|32432.05|-3424959.17|-4912660617940.467773|false|linguistics|katie thompson|2072-07-09 05:03:16|2027-12-17 02:48:27.457330747|2066-05-07 +-94|-30397|1336842978|9174894805640142848|49220.52|923281.97|3525110451298.835938|true|religion|oscar steinbeck|2053-11-26 21:39:37|2036-05-19 14:03:24.54912944|1995-06-02 +|9127||-8941201923743703040|-49633.13|-3573744.43|4866596139192.544922|true|american history|wendy johnson|2023-05-04 00:46:03|2059-11-09 16:09:16.564739548|2060-09-15 +-33|-30100|2144365072|1075|5131.58|4923477.10||||bob nixon|2024-12-17 00:58:20|2039-11-08 08:00:26.73617104|1984-12-19 +-98|20179||7492436934952574976|-38464.02|-4213531.63|-1107028386124.575684|true|yard duty|holly hernandez|2074-04-28 17:44:36|2063-11-06 06:22:44.6225769|2093-06-28 +19|24718|206454818|-8714995808835444736|-1117.94|927068.77|2436455705629.079102||zync studies|rachel allen|2056-07-19 22:03:52|2070-12-29 12:33:01.98160945|2009-04-12 +73|6320|618991041|7782245855193874432|-3179.76|4428479.65|-1766529550238.971191|true|nap time|quinn underhill|2027-03-26 09:37:03|2029-11-16 13:39:01.338991567|2025-01-09 +13|-9735|1216287232|8525894870444638208|-15016.32|680005.58|3656798201330.459961|false|joggying|jessica polk|2030-10-26 01:34:23|2058-04-21 17:20:17.602770253|2027-08-25 +-21||-1319753324|-7661192563533062144|1711.74|3518736.19||false|mathematics|yuri ellison|2048-08-17 16:08:06|2014-05-23 03:03:06.298424630|2044-07-10 +-90|-18928|||18298.73|563291.79|-1649989772280.555664|true|undecided|zach garcia|2035-01-29 13:36:22|2020-08-14 00:42:19.544792085|1996-01-08 +2|11664|-618505946|8995562121346260992|18316.47|2461394.35|-2061551909115.699707|false|religion|nick white|2050-10-24 09:24:48|2031-10-08 17:37:41.971088663|2094-10-25 +-77|7153|1905812339|7626715182847090688|-11311.74|-4552908.75|-3911290151123.445312|true|joggying|oscar polk|2056-09-13 01:19:58|2021-07-16 15:35:10.320557033|2010-02-17 +94|18535|1316369941|8146492373537660928|36988.38|1721002.37|2462069544028.686523|false|topology|rachel ovid|2019-09-21 21:25:49|2036-11-12 20:08:29.22887887|2032-11-27 +-12|30154|879500678|7682327310082531328|11783.63|-4207905.62||false|debate|mike king|2059-09-03 09:28:25|2063-01-16 10:26:39.947556321|2026-10-31 +-17|3226|1556919269|2968|42196.17|-2451091.00|3876386740696.417969|false|american history|yuri white|2071-04-10 11:05:18|2025-06-10 10:58:45.628035336|2011-03-02 +-100|1211|1304431147|7309156463509061632|-5463.47|-3681795.57|-4673125472245.232422||yard duty|priscilla thompson|2045-08-28 21:02:47|2030-11-20 12:05:21.353091621| +-53|7537|-316678117|1955|-37373.34||-2750380640224.783203|true|american history|holly brown|2064-03-24 07:42:48|2057-04-29 16:14:28.328830406|2060-09-08 +-99|8709|485105934||-36985.66|-1599375.73|2929377009755.462891|true|mathematics|irene laertes|2069-10-02 19:47:09|2038-02-24 19:37:41.765155541|2059-03-27 +93|505|-1096013673|7022349041913978880|19589.89|4262793.77|2004853350974.933594|false|joggying|nick falkner|2035-06-07 07:11:03|2072-09-16 14:06:47.711300624|2011-11-02 +105|7066|-1669227632|7045967493826387968|-10913.07|2599993.53|-396014088839.784180|true|nap time|calvin robinson|2030-03-31 02:18:35|2069-09-11 23:51:27.564297669|1998-11-04 +114|364|1328225044|3006|-38855.31|-1381463.11|-414382889388.811523|false|undecided|mike garcia|2073-09-01 12:10:50|2057-04-14 17:54:10.432274393|2047-01-01 +-90|-8685|919363072|65|3505.36|2568021.21|-3721923641458.708984|true|history|mike zipper|2056-05-01 00:25:46|2048-03-07 19:09:13.934285589|2053-12-15 +-50|-8286|-303747347|8168742078705262592|8746.11|-172.75|-439942020547.228516|false|topology|ethan nixon|2034-08-05 20:00:57|2070-03-29 01:10:47.734976157|2034-12-01 +59||1309976380|7212016545671348224|3338.18|-2832523.12|-2630495393711.740234|true|topology|victor davidson|2037-04-04 00:12:45|2060-06-21 08:52:55.142356530|2044-04-08 +-49||503752931|8079573715140485120|41917.77|3087402.10|2544461831013.400391|true|nap time|irene miller|2028-04-10 21:25:47|2067-08-19 02:13:08.73237559|2094-12-05 +-91|-10452|771827308|3965||-26770.97|3302343687377.344727|false||luke white|2077-08-04 16:05:43|2062-07-09 21:09:41.190969035|2030-07-31 +29|24105||8555933456197828608|-39584.52|-3148113.96|-3288325057617.243164|true||quinn zipper|2060-03-15 18:47:53|2038-04-23 02:03:51.16558012|2104-03-09 +|-14593|1552351592|2903|-34513.70|571155.32|-2582720634796.942383|true|education|gabriella underhill|2027-07-18 07:26:54|2078-05-25 05:22:02.627179287|2079-09-03 +2|-28551||7648729477297987584|-7246.75|1672329.45||true|history|zach miller|2060-09-17 04:27:39|2080-06-13 21:51:01.723250812| +92|16270|1678220496||24969.88|4258335.94|2436944648874.294922|true|opthamology||2071-10-31 06:32:07|2031-11-22 17:50:26.879890489| +-91|7579|-599396052|8223732800007864320|32479.80|-1379817.47|-4363720327233.622070|false|mathematics|quinn zipper|2053-08-03 12:28:58|2044-05-04 09:20:03.435933385|2105-05-15 +113|27982|622925063|-7412431471807283200|27846.84|-4799720.31|1201203871144.684570|true|education|rachel miller|2023-08-10 08:06:20|2041-09-10 10:42:44.371838149|2047-04-19 +13|23076|-1493282775|2560|-33310.59||1075000742069.072266|false|quiet hour||2059-11-21 19:51:29|2060-07-25 19:29:26.553466747|1972-05-27 +0|24600|492639283|2988|-43127.18|3448574.72|4019403441222.486328|false|xylophone band|bob van buren|2050-04-11 14:17:31|2072-06-28 09:30:55.249854691|1992-08-12 +63|-28374|1938788165|1243||-2660580.87|-4486948076141.814453|true|debate|quinn steinbeck|2066-05-13 21:20:53|2027-03-03 05:50:34.618523994|2076-09-27 +77|26749|290921475|1837|-15170.73|820623.92|-1288092098718.465332|true|american history|tom thompson|2034-04-05 15:57:02|2019-02-26 13:01:35.124414919|2004-11-01 +-72|-4451|-44426049|7014537632150224896|-5534.73||3805841621723.792969|||victor polk|2052-01-03 17:23:12|2013-11-30 17:54:56.483304338|2004-08-08 +-114|16062|1001732850|3747|-17473.64|3821933.91|1957794439377.628906|true|yard duty|yuri king|2075-12-22 04:38:30|2062-03-15 00:13:49.777150036|2053-09-13 +38|-28867|-675125724|2682|-19280.61|-2777246.20|-2757792116831.687500|false|xylophone band|nick garcia|2019-09-08 16:20:00|2076-01-24 15:05:41.840638133|2082-07-16 +52|-22923|1815882183|8073733016154431488|16599.35|3735828.94|-2623555786538.993652|true|forestry|fred robinson|2022-08-04 04:25:39|2030-02-01 04:32:16.711994446|1992-01-04 +-44|-24561|-2032576637|2938|5439.14||-1287352845163.880371|true|chemistry|quinn nixon|2079-12-24 20:06:15|2036-12-17 21:09:27.36962995|2091-08-21 +-114|101|742059797|1312|5409.29|1360964.78|-1132833175493.459473|false||quinn quirinius|2062-08-23 19:08:32|2021-12-24 21:33:32.33570035|2042-06-28 +80|-398|1614297403|7006803044329021440|19206.68|1631603.75|1928583868733.362305|false|quiet hour|zach falkner||2049-07-01 05:55:21.266193316|2050-08-22 +101|14261|-1831957182|7701723309715685376|16762.34|-4076580.24|-2252083222096.885742|true|geology|sarah garcia|2070-04-13 10:50:30|2033-03-22 15:50:00.469249022| +100|27999|550186724|7528074274555305984|-35626.28|2745675.76|4328216287180.062500|false|geology|jessica polk|2061-11-14 01:18:50|2074-11-13 16:09:10.889730571|1996-04-04 +100|-26433|1752520642|-7532751268425261056|16480.71|4007230.14|-1044307328651.719238|false|opthamology||2043-05-29 05:54:38|2074-02-12 05:39:52.488342753|2028-06-02 +111|4989|1251556414|8000440057238052864||3463290.18|4016113189650.382812|false|wind surfing|xavier ellison|2080-02-28 20:08:17|2077-10-06 12:41:21.147411339|1997-12-15 +-116|-18867|661659208|-7964801953178091520|-24442.39|-118153.32|1736698204716.517578|false|topology|mike nixon|2051-08-08 05:45:58|2071-08-16 07:04:53.257509570|2002-09-08 +10|-27667|-121162464|2846|18054.50|-4206613.37|-600768837741.725586|false|linguistics|luke davidson|2039-02-06 13:13:48|2042-11-07 11:25:02.115910277|2004-10-29 +-8||144499388|8723248113030782976|39473.99|-2235351.00|-18073418814.629883|false|joggying|zach miller|2062-05-31 10:37:48|2028-08-15 23:55:00.469898799|2033-12-07 +|-6396|-1425942083|7440265908266827776|48921.57|-3425386.51|269520612020.599609|false|zync studies|katie zipper|2059-04-29 03:48:13|2015-12-15 17:22:12.144511972|2039-01-27 +84||1044196568|927|24507.82|1204458.07|4090713174235.851562|false|opthamology|holly white|2025-08-28 21:46:05|2070-08-29 00:11:17.90785157|2079-09-17 +112|21991|-507250351|-7063777488249085952|-44754.93|2554931.12|655328886793.391602|true||ethan king|2057-02-14 01:22:09|2078-12-10 23:03:32.538772565|2094-11-21 +-11|-16362|1002519329|9194388393453060096|39211.56|-4063103.95|-3072034659571.372070|true|forestry|mike king|2048-08-11 06:18:24|2041-11-11 12:40:59.941618852|1974-10-20 +-72|22837|-1366059787|7720187583697502208|-48151.12|-3450914.16|-1948883975974.809082|true|debate|bob ellison|2080-01-23 23:18:30|2062-11-08 03:12:16.244942252|2069-10-29 +42|22278|-1210550573|8557218322962644992||-592807.94|172454528768.888672|true|debate|bob king|2065-06-23 08:15:17|2043-02-10 01:14:31.94775978|1997-10-08 +37|-13601|-1541281934|950|-47881.13|3061715.02||false|forestry|bob quirinius||2064-08-05 14:59:22.721494566|2007-09-18 +-119|26880||2189|42902.58|-1855478.06|1876416410033.172852|true|joggying|luke falkner|2026-01-04 07:40:12|2038-07-03 12:41:54.561417526|2023-06-16 +35|15644|-2041825946|1371|19463.33|2808651.01|-2817350314681.874512|true|forestry|jessica falkner|2039-04-24 13:30:59|2038-04-25 05:54:15.298921853|2035-08-02 +-18|25621|-533281137|7370803940448305152|1823.12|-3105299.85|1337878505653.194336|true|geology|jessica allen|2020-02-26 14:46:30|2054-04-19 20:08:03.415116247|2080-04-17 +-62|18827|1332042427|-8914039133569400832|22457.88|-390811.18|1058172512204.009766|false|undecided||2023-11-02 23:13:38|2026-08-12 02:24:05.267416889|2053-08-25 +31|-1900|-886741158|3663|43963.59|-922201.79|2965135330582.456055|true|forestry|nick quirinius|2049-08-24 23:52:43|2017-01-17 05:42:20.191687490|2036-05-02 +-85|30376|1951869763|2341|14523.33|-4986916.22|369571844908.663086|false|debate|yuri brown|2021-12-17 23:49:47|2055-07-19 00:17:37.54325516|2001-01-18 +63|-22184|1650676897|-8877431933441327104|-9484.98|-233929.16|3503864204687.659180|false|zync studies|sarah nixon|2055-06-30 21:18:55|2060-01-10 06:54:39.966397150|2061-09-04 +||1996235654|8171188598958407680|9466.25|490686.49|1931170057951.732422|false|geology|mike underhill|2039-11-23 03:20:55|2049-12-20 18:53:04.478116336|2097-01-20 +119|-14405|350802495|8525336514806317056|17381.64|4634177.04|4944206888507.451172|false|topology|ulysses carson|2036-01-10 08:06:52|2070-01-31 23:05:08.931249487|1987-08-11 +99|-3257|142722637|1608|-24219.34|-4696898.06|629224139568.691406|false|zync studies|xavier quirinius|2022-10-11 20:58:33|2038-09-26 07:31:46.33104093|1977-09-10 +-91|20349|1796486238|-7094189393339678720||-1535920.17|3324497425612.911133|true|yard duty|bob nixon|2014-10-02 20:51:31|2040-04-19 13:25:58.223437075| +-78|-9094|-1538978853|1752|-32761.54|2382823.04|3403982716083.717773|true|philosophy|||2051-07-08 04:17:48.731827233|2033-10-25 +-75|15532|1333148555|3084|21394.08|2588790.34|4071297905096.535156|true|xylophone band|katie allen|2067-09-23 04:05:50|2038-08-29 23:42:20.489635134|2036-07-27 +126|-10629|-362603422|3673|35461.58|2528874.81|4610594583850.691406|false|values clariffication|rachel falkner|2023-02-22 11:26:15|2013-09-19 02:04:12.907311135|2066-04-21 +86|32547|1566958573|9169248521377374208|-23486.84|2372148.79|2181996896360.196289|true|wind surfing|jessica allen|2018-02-09 10:50:39|2059-11-09 10:41:15.548959953|2039-07-16 +96||196980893|-7866079955473989632|-11204.72|-437773.76|1822512356055.412109|||mike johnson|2058-07-28 02:16:17|2032-12-27 00:22:34.913934544|2039-02-13 +-91|3796|-1949698319|-9004892183139811328|-37855.65|-722781.48|-1616992868522.899414|false|history||2023-11-13 14:49:42|2038-07-27 08:30:35.90734230| +31|26298|596595603|1892|-22485.06|3408030.07|801948999916.781250|false|geology|tom johnson|2060-04-25 19:34:18|2061-07-29 00:57:25.191363010|2096-04-04 +0|-17840|1300798829|6928080429732536320|-37174.23|-1136523.28|-419767614914.228516|false|american history|katie garcia|2058-04-24 23:56:54|2040-06-20 20:48:02.848038196|2021-08-27 +-66|-23325|-1050029724|-7623047151287754752|23314.68|-4551669.79|-4139576769987.520508|false|biology|wendy king|2016-07-20 20:59:47|2070-01-01 11:42:09.518048339|2084-04-30 +12|21415|-270683864|2492|-16252.33|4564437.09|2651265482813.621094|true|biology|zach robinson|2025-10-26 04:01:53|2037-10-11 06:54:47.165238299|2098-01-28 +-122|-15748|1225312439|-7695491171376291840|28830.89|-4919238.40|-2404376113558.761719|true|linguistics|alice ellison|2034-10-08 07:09:50|2032-10-21 00:11:43.739687278|2093-03-02 +123|-14928|-507955215|-7797151404935618560|27514.87|-725071.33|1433886080161.706055|false|study skills|fred xylophone|2038-11-03 19:47:00|2063-05-26 08:16:08.948060507|2023-08-16 +-55|23205|1377144283|8208354137450766336|11166.57|-475828.89|-2586759307363.770508|false|joggying||2056-12-06 04:33:47|2029-03-25 11:29:27.458800505|2052-11-08 +18||1056997296|-7395553021620731904|-17607.35|3248252.23|-1769756977914.625000||kindergarten|jessica laertes|2024-12-17 14:31:48|2030-03-18 04:44:48.450861752|2010-07-28 +-26|-14223|1524010024|-8453491903284994048|10070.35|-4865960.77|-666807424402.868652|true|zync studies|zach xylophone|2078-12-23 06:47:29|2051-07-17 13:45:43.90586019|2006-05-06 +69|-30974|-1938290238|-7140008543769042944|21635.46|4758510.88|2531109393452.517578|true|undecided|jessica allen|2031-07-16 13:24:59|2077-12-14 00:47:31.918822245|2023-12-23 +-105|-18269|922373046|2724|-16100.60|-1222437.31|4712395084916.033203||forestry|nick miller|2065-11-13 12:07:18|2038-07-06 22:45:01.365350444|1987-05-18 +120|-4507|-1006768637|3443|20453.01|517019.66|-2741137648843.547852|true|biology|mike xylophone|2026-04-02 04:46:51|2018-11-19 14:31:22.984247686|2011-11-04 +-36|18439|-1180153422|-7512297136103800832|12541.07|-89508.07|1932456148150.265625|false||nick ovid|2032-02-26 03:24:31|2034-09-24 06:39:52.148462757| +-71|-21820||9136234417125007360|-344.10|-4368885.38|-684785854994.736328|true|nap time|wendy young|2015-05-31 19:22:04||1999-04-13 +95|-9528|494570380|8192304692696383488|26081.03|596768.38|-3722020696135.146484|false|industrial engineering|jessica ichabod|2031-10-27 03:13:46|2064-10-16 21:17:40.991676060|2043-03-22 +-50|16693|758926227|8199513544090730496|4474.55|45041.27|1263477396015.986328|true|values clariffication||2069-07-20 16:00:28|2046-05-27 10:37:11.819233302|2048-02-27 +33|18430|-1850492820|311||-2016503.04|3247384437414.133789|false|xylophone band|victor miller|2077-03-07 06:54:23|2039-08-04 16:56:55.325072009|2092-07-02 +33|-20343|1802498539|-8488247955875618816|-29303.04|-1890856.20|-3463738392057.429688|false|education|quinn garcia|2050-11-23 23:59:12|2044-11-08 21:45:24.255164508| +40||104004730||-16275.03|2426747.55|-3021968566495.978516|true|wind surfing|holly zipper|2020-11-10 22:32:59|2037-04-10 10:37:11.57768325|2046-05-02 +-32|8407|1103878879|2540|-26808.05|3807549.41|4657041550408.074219|false|zync studies|gabriella xylophone|2056-10-01 10:15:41|2071-10-08 14:25:57.523825353| +113|5011|-310343273|586|-41183.23|-1362689.39|2787470317427.462891||chemistry|holly carson|2033-07-01 04:10:31|2061-07-11 22:31:08.650284032| +-28|26108|-520725912|-7444070205513138176|-39827.23|-1034617.82|1315126089570.455078|true|yard duty|victor king|2051-12-04 23:17:35|2076-12-13 04:56:16.684586095|2030-03-17 +-39|16858|-540820650|1141|1528.37|-296682.71|1883927511605.208008|true|philosophy|sarah davidson|2016-08-04 09:09:08|2028-09-08 18:31:21.297991583|1991-05-05 +119|-12605|2017314998|-8076479329071955968|-42478.42|-1960693.57|1876586582839.436523|false|topology|oscar miller|2044-06-18 19:36:31|2045-03-26 02:55:54.860474103|2066-07-15 +-121|-11956|-1622653291|3103|22234.29|2234574.75|-3286846428382.545898|true|joggying|oscar hernandez|2055-08-05 22:01:21|2063-11-05 16:27:28.810214216|1980-04-16 +-17|-12575|-1655030261|-7629401308029976576|-37573.41|636487.51|-1629367133219.616699|false|opthamology|oscar ovid|2022-03-01 15:36:43|2052-03-07 22:20:20.205649486|2019-09-21 +37|-14093|-828522499|-7507424948896415744|17128.06||-1526420376110.131348|false|undecided|priscilla quirinius|2046-01-07 07:13:43|2031-05-20 08:58:10.221063212|1994-01-18 +95|1911|829055499|2821|-10616.79|4955854.91|2648891009461.742188|true|undecided|mike steinbeck|2017-07-24 13:54:39|2018-11-19 00:30:46.951990466| +97|-12111|44628821|2017|46220.99|4937500.66|-4092236654348.724121|true||gabriella carson|2051-12-29 12:24:40|2081-01-30 03:46:20.707511017|2067-11-07 +-19|-8781|187718349|1134|-16918.24||-327614661830.679688|true||quinn thompson|2079-04-26 14:18:50|2035-03-05 02:02:29.658383331|2057-04-20 +30|5683|-414207254|347|-43541.79|146599.48|592881531340.800781|false|study skills|quinn carson|2054-10-16 19:16:08|2044-07-24 23:22:06.336643166|2003-05-03 +-108|-25959|1686537335|-7246123871306244096|-39465.61|-2661928.62|1241888355251.284180|false|values clariffication|zach quirinius|2060-07-13 06:05:44|2055-11-11 06:13:54.437966770|1979-10-13 +42|-7949|-244778184|2020|-32623.47|1298373.20|1029037158625.279297|true|values clariffication|luke underhill|2020-12-24 08:38:51|2042-09-09 06:54:14.918515325|2043-07-14 +68|-2441|-1545572711|1693|-16730.94|663322.06|4416782454362.753906|true|philosophy|victor underhill||2050-08-15 12:14:23.659037544|2067-10-09 +75||37730738|2020|-34070.67|1370438.69|-850139042535.751465|false|wind surfing|david steinbeck|2080-11-21 21:39:04|2038-09-30 19:31:28.756844490|2031-02-19 +106||950545385|8570983266408103936|22758.70|-4201837.10|-1699424366875.845703||forestry|priscilla carson|2073-04-25 16:34:56|2030-04-06 22:45:20.584837930| +-88|27039|1002132158|2919|23490.75|2525524.66|3928772869222.925781|false|quiet hour|mike nixon|2019-12-11 17:04:01|2073-08-27 15:25:40.462946182|2004-10-10 +-50|11929|530274409|2283|47506.60|4940143.09|2238828032947.566406|false|forestry|fred quirinius|2028-09-24 09:24:05|2023-04-15 06:31:19.498177657|2003-07-14 +-59|-21662|1645753684|7534042483076857856|-43875.53|-334324.80|-2776240810769.579102|false|biology|wendy falkner|2055-12-22 06:56:39|2056-10-02 22:01:36.199925909|1972-06-11 +0|4408|-1890963712||26425.57|4759776.27|-1443059704302.029785|true|linguistics|tom ellison|2016-05-09 19:10:05|2077-04-19 15:53:16.473585161|2084-09-28 +-10|-5468|2081243058|8991442360387584000|44064.44|-2369659.40|36307500543.130859|true|||2019-09-06 02:34:07|2071-02-11 11:55:49.568890442|2037-07-28 +33||-841634659|-7240213957902663680|21211.00|-877153.72||true|industrial engineering|victor nixon|2023-09-08 23:08:11|2060-11-16 02:44:18.534642052|1970-06-09 +29|-2734|1712411993|3365|-32719.17||-4737288224098.889648|true|industrial engineering|quinn garcia|2050-01-29 13:04:08|2040-12-18 06:16:17.316160473|2091-02-21 +52|30457|734267314|1899||1888026.72|-700748526629.659668|false|topology|yuri polk|2049-09-30 23:25:29|2075-08-08 17:30:07.90714933|2003-11-22 +-27|||7199539820886958080|35226.37|1673550.29|-2699241246489.911621|false|yard duty|zach hernandez|2020-03-19 22:08:11|2052-11-18 00:01:35.951499290|2045-11-23 +-101|-24518|-1272838092|7165364563962191872|-48066.56|-3054747.78|-2131219068131.891113|true|education|gabriella brown|2079-08-27 22:44:32|2016-08-15 07:24:08.694827835|2044-06-02 +-55||-1240912824|8407869317250220032|-17548.91|4920882.43|3568874049008.385742|true|american history|david brown|2022-03-01 06:58:05|2018-05-09 23:42:23.404363629|2094-07-04 +-36|-17697|766737781|1489|2717.26|2404319.31|4568135577936.089844|false|biology|sarah zipper|2067-01-21 03:39:32|2049-10-25 18:40:50.369693130|2034-07-13 +64|-11848|663222148|2400|7793.51|-1080682.99|300292232080.962891|true|biology|quinn zipper|2073-05-15 02:20:24|2078-09-18 23:44:55.37728042|2004-07-06 +20|1728|-1168823523|-7037375807670501376|-6247.30|-4396836.00|262755105545.977539|false|american history|fred white|2053-10-13 03:00:08|2055-08-15 02:54:48.257689180| +-114|26181|-2098078720|7235109456886816768|-4436.80|4060435.47|-2474429353072.800293|false|values clariffication|ethan steinbeck|2023-10-16 17:26:47||2102-12-17 +-55|-25166|1743671220|8569030475428511744|-17287.81|1544285.84|-1190092871797.787598||quiet hour|alice quirinius|2076-11-25 09:38:01|2074-06-03 01:03:55.294216048|2098-04-23 +92|28003||2067|34633.74|-4930157.23|-3335555451295.846191|false|religion|mike nixon|2079-01-28 08:13:47||1980-04-25 +-65|21932|-314935936|8332670681629106176|-18622.62|-3155966.42|4868108659966.345703|true|study skills|yuri ellison|2070-09-12 03:17:44|2023-12-05 11:59:25.166367744|2051-01-05 +119|25931|-53587991|168|35658.46|-4157897.40|-2306979416991.375000|false|undecided|mike xylophone|2079-09-16 17:22:30|2068-09-03 16:21:05.587837732|2088-05-14 +-49||-693207128||-43769.79|2528398.51|-2079376598199.473633|true|yard duty|jessica xylophone|2021-12-26 08:36:25|2026-03-17 06:08:03.892844960|2053-02-08 +120|27101|1314531900|-8300526097982226432|48717.50|2372843.18|-3228353162536.237305|false|philosophy|jessica king|2058-08-05 09:43:07|2041-04-13 04:48:06.347111885|2064-10-11 +|-32263|1883400319|8854677881758162944|-42999.08|3938337.09|-4183585055390.999023|false|chemistry|irene allen|2019-11-07 01:26:30|2036-06-20 14:51:38.975932153|2073-06-12 +-75|-31967|-1979314577|8779073705407963136|41170.81|||false|opthamology|oscar underhill|2016-01-25 21:12:31|2018-09-13 15:53:28.93779373| +86|28396|-1880783574||-25979.37|-2652497.01|-249524600693.789062|false|kindergarten|irene zipper|2031-01-14 12:43:21|2076-10-27 10:10:51.289692366|2053-09-28 +37|20023|1260480653|7593521922173419520|42391.45|325699.09|-281050837679.686523|false|wind surfing|katie nixon|2024-07-30 19:49:55|2037-11-20 21:31:56.305630678|2032-05-05 +|10124|458190500|3555|-43980.79|-1410972.42|2719205864547.937500||american history|calvin xylophone|2048-11-02 15:06:02|2033-01-16 03:58:58.296488174|2042-07-02 +-89|22194||2968|31862.19|3002267.51|718239362696.628906|true|chemistry|katie hernandez|2041-05-10 01:44:37|2071-06-21 21:17:06.20060768|2016-09-06 +-62||-20639382|71|-5056.23|2248385.04|2325450822865.419922|true|yard duty|mike falkner|2048-04-01 06:30:30|2061-08-20 07:40:18.93677396|2080-02-20 +-54|31049|901084309|2393|23481.29|-3231060.30|-4760389487887.977539||history|jessica quirinius|2028-10-03 04:47:22|2066-03-24 14:12:25.94922705|2050-08-23 +|21984|1920863389|-8552393882631389184|-20661.88|2117914.48|-262663215330.007812|true|debate|priscilla xylophone|2070-07-14 14:12:48|2031-08-24 02:24:02.486874527|2100-06-10 +0|19001|1198172036|320|18663.96|-394282.16|4567968932801.917969|true|mathematics|ethan ellison|2020-11-02 00:34:38|2059-08-28 10:06:05.989463516|2071-01-29 +88|-12396|-1411407810|8782900615468302336|32641.27|-2101467.53|-2373302972664.106445|true||xavier brown|2037-03-03 00:01:56|2061-07-12 06:31:32.495489622|2028-03-11 +-17|32563|2042816480||24206.59|-3883539.95|2769310272723.864258|true|yard duty|zach steinbeck|2034-07-26 05:02:39|2046-11-12 00:34:51.872415094|1979-04-27 +92|-24208|987917448|-8490382417169408000|33571.66|-1153143.45|-2147955542320.723145|false|values clariffication|irene zipper|2015-10-26 12:17:26|2015-03-03 02:03:10.966601995|1993-03-17 +-47|23564|1483580941|2911|42508.07|1839055.85|4309368289572.041016|false|industrial engineering|oscar allen|2050-08-18 07:33:19|2068-11-16 17:13:39.739249836|1988-01-21 +-116|32324||1542|-32519.49|-1891257.69|-3357497966164.828613|false|zync studies|priscilla young|2015-10-11 13:15:20|2063-10-27 03:53:16.807003987|2021-10-07 +77|-30890||-8368269352975982592|-37870.40|-4424875.69|-2485766831660.250000|true|education|ethan davidson|2077-02-04 18:08:33||2066-02-09 +-16|-29722|-120692484|8979012655944220672|16015.48|-1558583.94|1798093982631.314453|true|geology|calvin white|2054-01-07 23:38:01|2059-10-06 21:40:25.109705225|1979-08-10 +72|-28682|-1937640350|-8028275725610909696|27820.59|4278117.13|3305435240339.917969|false|study skills|oscar robinson|2059-10-18 05:56:02|2016-08-15 14:19:12.999923248|2089-06-05 +-101|-28941|-1424770359|-7198372044947275776|-32523.67|840723.51||false|biology|katie davidson|2029-05-10 17:06:02|2026-03-27 17:04:14.921374328|2038-10-17 +-62|-16793|-656478771|-8425998949410889728|-15790.92|-1652348.22||true|joggying|tom quirinius|2018-03-08 07:01:02|2063-09-08 22:40:07.263038797|2102-06-23 +98|14331|-1026458834|8515682078777081856|28545.71|3259711.52|-4509980330742.351562|true|values clariffication|irene ovid|2036-02-20 13:27:56|2060-06-23 04:00:35.209320146| +-108|11811|1082837515|7359004378440146944|11265.92|-4077197.98|3301612877254.633789|true|american history|jessica carson|2063-09-12 17:06:41|2059-03-13 10:36:01.981783557|2070-02-13 +-6|-16647|1750433588|-7345562788132315136|-31541.83|-2723473.85|-4953353311913.595703|false|study skills|ethan ovid|2033-09-25 06:14:39|2016-07-18 15:13:32.628189033|1985-03-22 +-98|552|-1858443953|7060236714847412224|-45640.71|1417589.56|1093251681175.538086|false|xylophone band|tom zipper|2047-09-28 08:59:28|2058-04-14 01:07:55.788892889|2052-03-02 +72|11317|2018249426|204|11543.87|334348.22|-3211811412437.590820|false|linguistics|jessica steinbeck|2057-11-04 12:47:59|2052-01-19 05:00:53.429811881|2030-01-24 +-32|-32755|1472487454|-6992217501957169152|-47197.79|-801250.41|-891597604011.273926|false|religion|priscilla steinbeck|2054-05-22 06:28:34|2063-10-25 08:50:51.226706413|2071-11-21 +-16|20213|229688868|||914135.87|3370729811362.806641|true||tom davidson|2029-09-02 22:46:03|2049-10-29 01:10:18.861315724|2010-02-23 +83|-15129|-1242677422|-7779270198785875968|44820.44|1773593.38|-95551441731.167969|true|study skills|zach young|2063-12-29 05:00:16|2060-02-05 05:38:42.751856432|2071-07-18 +-86|-5829|-591879497|8473699639908261888|-43294.89|3526676.89|387330241230.863281|true|education|david garcia|2056-03-14 09:04:34||2024-05-07 +49|-25301|-1339495001|-8047774491688255488|32430.96|-1138428.01|213917180471.554688|false|joggying|wendy ichabod|2036-06-18 18:02:11|2078-03-05 18:23:13.53272383|1970-06-09 +-85|-31707|2145269593|3932|1974.27|-2242738.54|-3719487280001.985352|false|nap time|priscilla steinbeck||2067-03-15 02:27:42.845093748|2000-09-02 +41|29011|94220511|-8694818694700048384|46485.77|1092603.94|19208466388.569336|false|biology|oscar brown|2075-07-26 23:30:09|2015-05-22 08:58:32.953962204|2013-08-06 +-7|-25698|-939348081|-6968771079156654080|-40596.72|-3926229.46|-4294069592964.184570|true|religion|mike johnson|2036-12-06 17:44:24|2069-01-05 01:45:55.46640402|1971-09-02 +-87|-13499||7078641038157643776|-34520.93|4813321.50|-3935312554539.152344|true||jessica white|2054-11-17 14:06:49|2025-01-03 16:58:11.555739266|2011-12-02 +-92|-23671|33234633|2011|8452.90|-3704929.28|245030641872.894531|true|values clariffication|yuri van buren|2069-07-03 22:56:12|2029-10-25 23:23:56.717999797|2051-04-05 +|-14973|-1004204053|7223569671814987776|19550.67|-4154920.52|4425449699512.142578|false|geology|luke johnson|2022-06-29 03:58:00|2032-05-22 20:58:43.668140773|2077-12-15 +105|-4218|52667480|8071961599867387904|-29199.81|2499689.99|-2059563163384.368164|false|industrial engineering|david ichabod|2029-06-29 02:09:15|2066-11-21 08:11:25.882556182|1990-03-17 +64||888896424|535|44832.26|-1759444.28|3135189054105.787109|true|philosophy|zach white||2067-01-24 22:43:40.831103313| +-9|-11160|413090363|928|31337.77|4617801.75|2620515827701.019531|true|education|rachel king|2045-01-12 05:23:03|2024-12-12 16:34:07.506230777|1986-04-23 +86|12271|-387395264|-9142610685888192512|-18646.76|4150790.60|2678768130285.895508|false|biology||2031-11-11 05:35:07|2065-02-07 23:08:09.482878471| +-86|-11286|-1032306832|3606|36391.72|4919121.16|-4505856773264.867188|true|yard duty|katie davidson|2030-05-08 05:54:34|2031-05-29 03:15:14.948088213|2021-03-09 +-79|31581|615619268|-7751427073017544704|-2561.67|2023322.69|896698823749.275391|true|kindergarten|xavier underhill||2063-08-21 15:54:32.914330692|2057-05-05 +61|-11158|-583908704|7710447533880614912|45288.62|-4348649.88|-527665816963.711914|true|kindergarten|fred allen|2064-01-02 02:56:41|2058-06-12 20:59:19.253673990|2088-11-14 +87|1387|1377359511|3707|42065.36||-2837213968034.148926|true|mathematics|ethan davidson|2029-08-31 03:24:26|2029-12-28 20:17:31.312188949|1997-02-15 +-1|17534|-846450672|-8544299740525461504|23709.75||-1741610321411.606445|false|history|david white|2038-03-20 16:38:39|2069-11-16 01:49:17.829383874|2004-01-25 +|-20100|-1280919769|-8525212657458348032|-10581.34|3561523.46|-1289720339267.503906|true|xylophone band||2077-08-20 13:55:16|2047-03-12 14:26:07.332611637|1978-07-08 +|30216|-2122509553|3060|24735.37|-4144169.06|3475578255528.653320|true|industrial engineering|nick nixon|2067-06-16 11:31:34|2066-08-08 10:42:51.186855806|2060-01-13 +48|15626|-829717122|7414865343000322048|29695.82|2755306.54|-1698346362689.697754|false|wind surfing|||2072-08-01 17:29:19.788306199|1997-10-19 +69|-7058|-370798230|2358|22518.56|1965122.27|2422938575571.791016|true|topology|katie van buren|2013-06-16 02:16:22|2015-11-07 23:21:55.271471827|2102-01-11 +-52|30926|-1679120527|2824|31807.87|-3898871.20||false|philosophy|jessica quirinius|2030-09-03 08:07:19|2023-06-28 14:38:10.444522865|2102-12-08 +-120|22678|-707108808|8388363436324085760|21821.03|2538757.59|1962480701366.154297|true|biology|quinn johnson|2064-09-03 19:24:30||1976-01-27 +-114|6893|-1179668872|2085|14863.00||-1986143165196.629883|true|debate|wendy garcia|2050-01-29 18:00:33||2086-09-09 +32|4138|1336365018|364|35364.65|-2496609.74|4311249096001.646484|false|industrial engineering|gabriella hernandez|2055-09-24 13:37:44|2054-03-17 08:45:41.907691942|2079-04-24 +-115|-240|2133492883|8279056098670198784|-28232.35|-3840662.29|-859212665814.937012|false|yard duty|gabriella thompson|2028-09-07 02:14:16|2057-08-18 19:48:06.486182244|1973-09-22 +53|-15605|-1873004551|1856|9248.36|1828699.64|-2363428449892.879395|false|wind surfing|irene zipper|2028-09-27 21:17:23|2044-10-11 13:36:15.610377306|2076-01-11 +-105|-14667|-1406691044|-7824788571789279232|-30071.25|-4650363.84|-4748916605973.221680|false|topology|fred johnson|2015-10-25 20:09:26|2076-11-04 09:42:42.444252544|1982-11-13 +16|-29285|1273798925|8983857919580209152|-29449.44|-127752.12|-3009169710510.590820|false||priscilla thompson|2032-11-06 11:23:29|2020-02-18 10:32:14.521626327|2045-10-22 +1|28656|-19116270|-8584520406368493568|-28472.44|-731836.46|2544337191828.027344|true|linguistics|zach davidson|2046-09-29 07:58:30|2023-03-16 03:49:09.744554213|1981-02-14 +-98||1290381132||-42137.32|4322498.49|-977515822924.530762|false|values clariffication|xavier falkner|2031-05-16 13:51:42|2014-07-01 18:04:03.182673788|1985-11-21 +-66|11363|-191434898|383|9121.80|-1720548.35|-991233598328.165039|false|quiet hour|sarah underhill|2027-01-01 14:41:10|2053-07-11 23:10:08.4858029|2060-06-17 +-105|22150|1767019352|1066||-3722136.99|1483263131626.095703|false|education|yuri carson|2069-08-07 19:50:56|2027-10-17 12:50:48.598976231|2074-09-03 +-30|3891|-1758125445|3945|45738.36|-3046121.88|185310650268.100586|true|wind surfing|ethan brown|2067-04-17 00:25:17|2061-01-17 18:29:01.401347849|2055-11-25 +63|28748|715333063|501|-25215.89|647868.74|1620505790042.265625|false|wind surfing|mike young|2018-06-29 18:39:29|2046-10-06 18:13:16.513478823|1982-09-09 +87|-8293|-728015067|8146288732715196416|48883.00||-2873441557625.042969|true|debate|||1970-01-01 00:00:00.347751099|2053-03-11 +117|-31857|1756592797|2855|46735.27|-1464074.84|3917290352543.716797|false|philosophy|luke ovid|2036-05-12 12:17:10|2068-05-12 21:27:08.727324631|2091-06-08 +125|26867|746904285|820|8859.49|-4406626.41|-2643457571558.727051|true|linguistics|fred laertes|2016-06-30 20:12:26|2043-04-29 11:43:58.291833429|1977-12-15 +27|21904||1454|15105.83|4822404.58|-4286981451283.615723|true|nap time|jessica johnson|2067-08-03 19:22:44|2037-07-10 21:53:00.185589685|2095-07-17 +-6|312|430686478|9165199002069458944|-46955.95|-871722.65|-2559271129977.851562|true|industrial engineering|ulysses polk|2068-09-10 20:32:04|2029-05-16 06:34:15.82366094|2055-11-15 +37|31740|-203911033|41|-26556.12|4169708.31|183699834451.865234||chemistry|calvin carson|2028-02-06 07:54:37|2028-02-05 21:09:50.19666416|2010-02-22 +-24|-3309|1304812803|1509|-893.76||817275082175.292969|true|education|ethan quirinius|2023-08-21 13:22:03|2050-06-08 13:42:20.451205969|2094-06-21 +-59|23683|-1875699183|-7572262898020278272|-14589.14|-1374955.49|686549487454.719727|true|chemistry|bob polk|2066-10-27 01:03:19|2021-03-29 05:39:42.725451255|2062-05-07 +-105|2234|-1949359208|-7172594404186693632|37975.31|4445357.92|-2147069768093.352539|true|philosophy|alice carson|2062-06-17 16:47:40|2065-11-28 13:20:07.347051756|2053-05-03 +-8|-23462|-448060992|-7329767178250018816|9793.03|-2305056.98|-1231123023969.225098|true|quiet hour|alice miller|2031-08-17 22:10:06|2020-06-01 22:04:30.213218147|2053-11-11 +2|32133|-1784633305|-7507578199583694848|-29143.91|-323337.95|2015902300426.439453|true|study skills|zach laertes|2019-12-23 23:20:15|2077-03-17 01:02:35.649486371|2081-11-30 +|-28519|-1471147786|2009||-1526976.13|805374371411.496094|false|education|nick brown|2064-04-21 21:22:34|2015-12-21 22:24:04.431747619|2065-11-13 +|-7035|-1446132523|1948|36405.40|-263484.12|-2463355069754.271484|false|history|ulysses polk|2035-07-18 19:32:16|2074-09-07 21:49:22.240577143|2088-05-28 +92|-17356|-1213081886|-7782344916178796544|-48963.37|-1119756.54|-2978537429335.048828|true|kindergarten|fred allen|2060-10-15 20:23:24|2034-04-10 22:54:32.825495552|2066-01-08 +90|-30360|1273877405|7761834341179375616|-32576.28|-1904827.22|-4237985572934.519531|true|nap time|calvin nixon|2062-11-07 07:07:18|2014-10-18 10:24:23.318810654|2006-04-18 +93|22594|1436480682|3848|-46647.05|-846024.96|4197387495290.669922|true|religion|ethan falkner||2013-06-20 22:21:25.700551568|2017-08-21 +-101|5539|-1903090602|2599|-44605.59|-906161.97|-2765462365306.454590|true|chemistry|sarah ellison|2075-05-11 22:13:41|2067-08-09 04:55:46.596194633|2060-08-27 +-5|13247|-1568536214|-9148197394287779840|-29904.81|1018173.54|4363867341701.669922|true|biology|ethan ichabod|2055-11-13 07:21:13|2037-12-19 14:40:17.800815865|2090-09-13 +62|23881|-891543038|3024|1467.79|-3115534.46|2777793102100.396484|true|history|holly carson|2018-08-19 13:12:04|2021-03-17 04:26:22.691697033|1989-04-21 +-113|-28689|-1952235832|7569249672628789248|48800.65|-1818362.64|-2318038795882.877441|false|||2054-09-18 20:45:31|2039-08-25 07:17:12.897422973|2053-03-09 +-83|30532|596280431|7175638927948562432|-35548.30||-3309264029290.908203||undecided|bob johnson|2019-05-11 05:04:25|2044-11-22 10:00:42.386664113| +-73||1958701268|-7228589258642194432|29244.34|-3241676.66|2306452150855.093750|true|forestry|ulysses miller|2070-03-02 04:16:19|2047-10-21 14:48:00.965133636| +|12291|1126157283|2155|-41026.90|1949445.64|-1213071720047.551758|true|chemistry|wendy xylophone|2038-11-14 00:48:10|2051-12-25 04:00:41.572207865| +-98|756|667283966|8011602724663336960|-48164.54|342083.00|386944022279.926758|true|debate|jessica king|2069-07-28 06:10:12|2063-01-16 17:33:44.523152273|2088-11-19 +-13|25777|-1419573027|7195454019231834112|38029.66|-4972189.08|4521077673959.546875|true|study skills|holly ichabod|2052-08-13 05:29:21|2038-09-14 22:21:59.558718770|2011-10-18 +53|-13226||7549858023389003776|-12346.33|-1579477.07|-2155923560071.712891|false|mathematics|david ovid|2063-09-13 03:10:59|2065-01-19 23:19:59.816489363| +33|-17944|-512198016|7528211148397944832|14920.31||-2424496043950.270508|true|quiet hour|katie miller|2033-09-08 10:28:36|2015-12-02 08:43:04.431041576|2007-10-13 +-53|20540|-1147471772|-6962292590214234112|-41875.83|1829704.68|1112609353807.556641|false|biology|quinn polk|2025-04-16 13:33:21|2069-08-23 02:10:00.313463752|2046-06-15 +94|-11528|-2112149052|7599019810193211392|-41807.04|1368791.92|-3671181100745.925781|true|joggying|calvin robinson|2072-11-24 03:34:25|2075-02-15 01:37:03.892196000|1985-09-12 +-104|30201|805672638|1769|15342.01|-3063807.66|843728646940.320312|false|history|irene thompson|2015-08-28 06:41:13|2055-02-21 14:07:12.947865503|2011-03-25 +-90|-14652|731241198|1321|-34809.80||-2346155655585.455566|true|american history|xavier davidson||2049-07-25 21:35:24.211530780|2004-04-14 +-65|153|-1403154847||-1983.91|4380789.10|932489706513.735352|false|quiet hour|luke ellison|2039-03-25 08:36:16||2036-09-12 +27|7899|-1247325089|-8445801063348281344|-23834.19|708792.89|284261185960.055664|true|industrial engineering|calvin falkner|2028-02-12 19:38:01|2072-11-08 21:30:05.175267657|2072-12-30 +29|-14815|526502851|-7340231535789727744|-45069.61|2238717.32|-4453121016505.043945|false|study skills|oscar king|2044-10-19 23:09:11|2074-12-09 08:39:18.539753783|2020-02-01 +-102|-3360|824235855|1518|5014.76|3618423.45|-3907271111640.789062|false|wind surfing|bob polk||2052-08-25 00:22:17.135058768|1978-01-21 +71||1216016081|500|-24898.26||-4315206138802.594238|true|quiet hour|ulysses garcia|2035-09-18 13:19:56|2078-07-15 08:06:13.367168984|2015-09-04 +62|12048|-1554130090|883|18314.24|1636916.18|4938317552481.664062|false|mathematics|sarah zipper|2060-06-21 13:35:18|2039-10-23 06:50:15.287634401|2006-12-07 +-52|-23527|-1516259168|4056|-23121.85|-693272.96|-4155565016160.593750|false|chemistry|priscilla falkner|2032-01-23 09:54:36|2073-12-17 13:37:32.263464187|2098-07-04 +|-27871|1406029775|1470|16901.01|3850495.43|10462215475.719727|true|philosophy||2069-08-07 12:12:03|2030-04-27 06:43:41.486321103|1972-05-02 +-76|31509|1318956413|-8959796625322680320|-27759.95||-1863526316147.314941|false|religion|victor falkner|2053-12-19 03:37:20|2071-08-04 02:01:41.490785966|2007-10-14 +30|4206|1260101584|9062227900376203264|42958.29|-4035840.58|410251458493.747070|true|quiet hour|yuri allen|2017-01-17 18:08:15|2055-09-10 22:41:08.576594494|2046-12-02 +74|-28914|700341242|-7751265769984491520|-6754.81|-587473.64|1873443121269.744141|true|joggying|victor king|2070-02-16 19:10:24|2051-04-10 15:13:10.310012763|2061-11-02 +56|19568|-1660344634|1890|40490.24|-4694708.34|-506769976049.526367|true|american history|nick polk|2016-11-16 04:06:28|2077-10-05 18:56:18.80522682|2084-10-25 +-52||1076088102|3533|21809.68|4267756.81|2517234933838.885742|false|study skills|rachel quirinius|2061-04-10 02:13:09|2072-08-27 09:24:32.83013561|2048-09-15 +-33|23844|-1422780798|1613||-267398.80|-4073545616969.127441|false|opthamology|bob davidson|2046-06-27 17:39:40|2066-12-16 16:42:45.480565956|2048-01-15 +-121|-696|1182390248|7031339012080549888|49883.91||-3728061292967.872559|true|quiet hour|fred xylophone|2022-10-08 06:48:32|2021-08-28 05:40:25.870268434|2060-06-29 +7|29851|-1534307678|9048297564833079296|-29401.73|1648001.02|4314568174815.570312|true|linguistics|yuri young|2028-02-27 05:29:57|2063-09-06 20:08:05.406472821|2103-06-16 +-40|22655|963854010|7084659344078970880||-4605201.19|3207503926905.630859|false|kindergarten|mike robinson|2063-05-21 19:20:41|2044-03-13 05:02:40.948065998|2017-04-22 +76|-27707|1825828852|3672|-22118.16||800944762049.660156|true|undecided|quinn white|2062-06-23 12:09:01|2021-07-28 14:07:29.294310147|2084-01-12 +|-29171|740883263|1450|-9658.32|3055310.97|2204128352085.490234|true|mathematics|quinn nixon|2074-05-02 07:09:00|2072-02-02 03:56:13.682965502|2018-05-02 +-113|32581|453613037|-8142667274351345664|-2527.26|-1803737.40|2147384303392.172852|true|education|holly allen|2042-08-22 12:22:12|2078-10-06 00:10:58.709502336|1975-01-14 +-87|5190|1516236846|7486884806277611520|-11934.38|176111.39|-1700879323340.273926|true|industrial engineering|bob steinbeck|2070-10-23 03:53:03|2019-03-22 16:39:15.979672249|1985-02-15 +-77||1902676205|8514979402185596928|-26341.94|2040150.16|4742828481026.998047|false|education|david brown|2070-07-18 17:27:10|2013-09-22 00:59:54.600916680|1974-03-14 +-98|5549|-1067083033|7212090742612467712|-49625.17|4761212.86|3679966124819.919922||topology|tom davidson|2078-05-07 20:28:35|2066-07-28 05:59:34.33166299|2056-08-14 +54|-15936|166320811|9211455920344088576|12047.69|-4099704.00|-2128007235542.526367|true|religion|nick allen|2045-10-19 13:39:33|2016-08-24 16:45:54.184319832|2000-12-05 +81|-22937|176792505|22|48719.83|-1651346.83|-4968050069316.731445|true|topology|oscar thompson|2057-06-05 23:37:14|2036-03-18 10:37:46.993072194|2081-11-21 +107|8095|-2147071655|112|48797.43|-1977864.91|-146700825796.983398|true|topology|rachel falkner|2050-01-15 16:01:56|2065-03-19 22:15:14.409248254|2040-11-24 +6|-15576|-491377296|3203|41853.04|-2547601.36|-3932105722522.933594||geology|calvin zipper|2047-12-23 17:32:41|2056-09-01 05:40:36.226800355|1991-10-09 +90|32669|1618123796|2850|13386.45|-158319.54|-2994263442674.131836|true|zync studies|ethan ichabod|2063-02-27 12:34:18|2037-08-18 21:26:34.653592100|2002-06-25 +42|23624|||-35607.07|-3856180.80|-1283909858208.360840|true||bob davidson|2056-11-01 07:08:35|2027-06-21 22:59:34.332774800|2069-09-24 +11||-462541618|3333|-47939.71|-2853027.07|2406489217263.256836|false|topology|jessica xylophone|2062-04-10 20:41:17|2023-01-12 01:05:51.508185273|1998-02-16 +79|-11006|1363568842|7871189141676998656|28903.24|-3332708.82|-1243576018775.911621|true|mathematics|ulysses white|2074-10-12 09:54:57|2040-01-01 03:10:35.741193113|2029-11-18 +-79|4397|-1625062942|1796|12877.59|-2482414.22|-366729280142.763672|true|history|tom young|2066-08-06 02:14:55|2045-05-09 01:30:52.296126909|1976-12-03 +82|5196|-1505397109|8853989376829833216|-41663.78|-4887052.76|-3943258984957.225098|false|quiet hour|rachel xylophone|2056-06-13 12:15:30|2071-10-05 12:34:48.394333775|1973-06-18 +7|-18672|-1881263242|7065344324692443136|-2559.24|-4057157.83|103436176467.041992|false|xylophone band|ulysses zipper||2042-01-17 05:21:30.712047647|2091-05-06 +-90|11877|-1628799508|7775034125776363520|17239.97|3011536.81|-306501592876.848633||biology|alice underhill|2033-02-26 07:08:07|2044-03-17 21:28:59.879032353|2099-11-19 +|-1335|-1423477356|7308289763456000000|-20504.78||-867620120929.686523|true|zync studies|katie ovid|2035-03-18 17:10:14|2059-10-27 08:45:21.483045033|2013-09-20 +79|18857|1224662770|-8881446757271846912|-5915.38|1668877.19|54423726527.943359||mathematics|calvin van buren||2013-08-14 01:50:13.516978015|2101-08-08 +-8|20794|388707554|9016280522993975296|-3266.28|1632725.90|-4803569350262.506836|false|wind surfing|nick thompson|2077-01-09 11:45:18|2023-01-13 03:39:22.546498789|2063-03-31 +46|-15874|-329336519|8374321007870836736|-4869.45|-3732055.57|-2899188334900.983887|false|american history|luke robinson|2074-09-18 00:29:16|2028-01-04 00:08:05.604429969|2016-10-27 +-119|-23137|-352146259|7054938591408996352|25059.23|3126403.10|901936833002.139648|false|biology|xavier van buren|2057-09-08 10:14:56|2027-01-22 20:10:36.408673736|1994-03-25 +-122|24455|-514010922|-8232763638546694144|-22580.56|3348247.17|4202677774880.996094|true|chemistry|yuri laertes|2073-11-28 13:28:08|2042-06-06 07:33:49.17122575|2060-10-26 +11|-21274|2102440065|9089435102788009984|-28578.33|2621188.65|-255054620124.468750|true|undecided|tom van buren|2066-12-02 02:53:22|2017-05-01 17:13:49.491721578|2035-04-30 +61|31948|66182203||40936.71|-3249322.84||true|education|quinn ichabod|2016-06-18 05:36:35|2029-01-24 19:44:04.24594440|2023-04-12 +-40|-733|1998185704|4054|-24854.87|-3604381.12|-3779922035606.979492|true|chemistry|quinn hernandez|2053-06-05 03:55:29|2023-07-29 20:12:48.388612071|2103-04-23 +-27|21792|686081268|2790|38457.94|2811951.74|-1741278698275.827637|true|joggying|holly laertes|2016-09-17 20:56:50||2082-05-26 +-26|10663|-1594957608||-34529.01|3296608.52|-1835124513932.413574||history|ethan xylophone|2042-09-02 06:50:05|2021-08-20 20:43:12.92751491|2002-08-20 +122|13265|-765190882|9207107990561972224|47360.00||-4819156991738.395508|false|xylophone band|mike white|2080-01-03 19:54:56|2041-05-28 08:27:17.127266228|2096-09-09 +76|17286|609917172|-7647020450676146176|-40328.10|4443378.81|-4230922395249.549805|false|opthamology|luke ovid|2056-10-23 05:28:13|2036-11-07 19:08:45.996173458|2099-04-07 +-105|-20752|985634256|7870277756614623232|-8573.19||3712080007920.981445|true|american history|quinn miller|2031-04-10 06:37:29|2079-07-14 10:07:42.590879861|2098-05-16 +111|-29811|-1345391395|-7138415011665043456|-43243.33|3559341.21|-615166651772.831543|true|forestry|yuri falkner|2038-01-14 21:58:13|2065-01-27 22:52:28.599612593|2082-09-30 +80|-18547||4075|7104.69|2204690.58|1224935205308.312500|true|topology|mike van buren|2036-10-06 06:18:48|2050-12-07 01:06:25.366922684|2104-08-30 +112|20348|1103797891|7077311975029555200|-44170.71|334056.60|-3129131862045.682129|false|wind surfing|nick underhill|2013-12-20 23:38:15|2042-10-16 10:35:25.793777443|2042-05-14 +|-28906|210728566|7295502697317097472|38326.73|-4441185.31|3215358777531.882812|false|religion|fred xylophone|2080-04-10 17:57:39|2045-10-12 09:13:28.174343501|2089-01-06 +-20|-20834|1257621270|9104574294205636608|-5156.07|2064139.93|3101600356212.976562|false|zync studies|katie miller|2038-05-25 08:10:39|2030-05-18 23:07:28.872160131|2006-10-08 +4|-7178|-1234163924|9131533983989358592|10162.69|-684326.75|541391666070.829102|true|opthamology|holly miller|2065-09-06 07:01:50|2032-05-20 21:51:43.880235085|1971-06-05 +-26|-30907|789871166|-7344146703223496704|35118.89|936904.69|750291107660.423828|false|philosophy|david ovid|2070-12-26 07:32:20|2062-12-16 23:06:43.116222435|1993-10-26 +-14|-7715|77063155|7130306447560826880|-34264.49|-31967.96|-1002469169364.878906|false|wind surfing|zach brown|2069-11-21 06:19:24|2076-05-23 07:42:52.682173527|2044-08-02 +-67|-16872|1754025802||33943.20|-2424320.14|-549393127536.781250|true|history|victor steinbeck|2052-12-18 00:08:22|2021-07-20 17:38:18.324036947| +-68|9069|-579916775|8184799300477943808|-2578.18|2341699.48|1134499614756.023438|true|linguistics|priscilla ovid|2056-12-27 14:54:51|2067-01-23 22:58:00.339520560| +|240|-1820436871|-8853553406533894144|11549.29|925416.12|-179699318578.983398|false|wind surfing|alice thompson|2059-06-19 06:27:50|2021-10-29 14:27:26.966421551|2007-12-23 +-45|-5338|881396599|9139805788041134080|-26482.69|2873812.27|3896505909246.314453|false|yard duty|luke white|2041-03-05 02:35:44|2065-05-28 07:22:17.384344554|2027-12-08 +-77|-13231|-38458614||-34256.32|-1535356.84|-2453335122672.513184|false|religion|calvin polk|2049-09-04 18:28:13|2041-11-20 12:24:05.687150484|2020-05-09 +25|-11675|-835002549|-7333362172439035904|37914.80|826088.22|2609781430749.009766|true|mathematics|katie ovid|2020-05-24 16:00:17||1988-10-30 +78|-32755|-1210261177|2476|-47317.89|4982060.93|-1235804549005.088379|false|chemistry|tom johnson|2057-03-25 09:18:17||2080-02-27 +-91||-758973175|9030480306789818368|42056.85|-4820814.85|-559313810281.701172|false|chemistry|alice ovid|2057-10-11 11:43:18|2053-02-18 08:06:40.270486504|2083-10-10 +-89|8721|-9958400|1999|-21802.89|2243832.21|-3650203891150.916016|false|education|zach miller|2047-07-13 06:22:16|2072-06-23 11:48:25.414890648|2047-01-14 +-37|7059|528218910||-18867.85|-41236.41|731421305775.334961|true|values clariffication|calvin young|2067-05-17 08:35:31|2046-01-19 02:21:03.547201945|2080-05-04 +-10|-27035||3962|-35386.39|3476711.79|-82867159743.145508|true|forestry|wendy allen|2054-03-08 09:38:48|2034-09-14 15:33:38.739682850|2004-03-18 +-29|23928|1083855659|-7476082621253402624|-25127.58|-1680003.18|-3185792319590.632324|false|debate|alice johnson|2076-03-23 22:55:05||2071-09-09 +-2|8675|-1731820254|-7989766326847807488|13048.95|4435172.95|1790513840581.020508|false|xylophone band|luke robinson|2070-02-01 22:03:42|2021-05-18 08:30:58.199152453|2094-10-18 +6|-25033|-393723522|1676|38559.82|2171322.14|2294980278961.240234||xylophone band|gabriella carson|2069-01-31 09:20:42||2074-02-13 +48|28973|-436386350|19|45498.19|-3819438.45|-2493337985182.613281|false|philosophy|victor young|2080-11-08 17:30:02|2025-10-28 00:58:01.851385860|2059-02-12 +52|28570||9084402694981533696|-29027.44|-530441.64||true|biology|holly young|2030-02-20 11:06:41|2026-02-09 05:24:47.977378166|2098-04-07 +-91|-3202|-913906252|85|14183.79|-1833054.72|-3376118593994.625977|true|nap time|calvin ovid|2074-12-29 21:01:27|2049-07-16 17:35:45.463721949|2089-11-02 +-86|-20894|75823003||46364.55|3697592.89|-1740232076211.789551||linguistics|oscar king|2071-10-16 12:28:18|2061-10-31 12:48:02.989589922|2077-09-12 +-75|11551|-1036720157||-36658.28|-3125140.09|1484228818170.123047|false||david king|2020-11-30 02:28:04|2054-02-09 19:31:45.358640573|2040-10-02 +-69|-19681|-1313618168|7678790769408172032|-31855.38|-1508399.23|4753945829836.361328|false|joggying|xavier hernandez|2037-10-27 05:17:05||2099-01-04 +-106|8922|2075919195||-23144.33|-4262662.44|4398332823245.781250|false|topology|holly hernandez|2077-12-18 11:03:27|2036-03-16 11:32:34.9687069|2075-01-07 +16|7939|-2037628236|2193|30061.15|-1980556.87|-3337375261599.789062|true|wind surfing|luke xylophone|2040-10-20 22:14:18|2051-05-30 16:34:46.186951645|1984-06-21 +29|29434|1202720813|3248|29432.23|597950.19|3511974955528.032227|true|kindergarten|katie ovid|2070-04-02 13:48:39|2044-11-02 03:09:50.932425383|2078-12-11 +-24|29892|-853967587|2194|-15719.12|1470631.02|-3556010863531.895508|true|religion|irene thompson|2077-10-02 19:15:36|2016-06-24 23:20:54.306578418|2105-10-08 +-71|9962|-946830673|8734584858442498048|-2685.04|-2038725.16|1250647379526.768555|true|nap time|wendy underhill|2029-09-23 13:37:34|2033-05-09 16:02:52.972794365|2071-12-04 +-114|18690|-99916247|8419958579638157312|1230.84|4853977.50|-3337430312348.033203|true|quiet hour|ethan white|2062-07-20 19:25:58|2029-03-04 22:25:32.372030233|2093-02-09 +51|-3436|-1459528251|8135164922674872320|-19595.72|837425.17|-2849980786453.798828|false|history|quinn ellison|2040-12-22 18:36:58||2077-03-15 +99|-79|-399643110|-9215144824304721920|-42449.75|508914.90|371694855487.127930|true|industrial engineering|ethan johnson|2053-08-08 07:04:47|2080-04-15 07:19:37.104699865|2089-05-19 +105|-25664|-1351437382|-7511952204985049088|-23273.93|-4798646.03|-3685975705841.393555|false||alice xylophone|2077-05-13 06:22:31|2023-11-22 17:50:18.908675615|2001-05-24 +-57|11843|-1240208945|967|-38329.26|1245240.05|-72632238324.530273|false|topology|ethan quirinius|2049-07-24 02:45:12|2060-04-10 06:20:53.456525992|1996-01-11 +125||1094778643|263|19292.63|-1873119.61|-3878455861390.826172|true|xylophone band|zach king|2064-10-03 00:12:53|2022-06-05 07:40:54.163193159|2090-04-22 +106|21976|-1144920802|3058|-42052.15|-2785909.98|4096964022421.367188|false|religion|ulysses king|2016-05-30 06:20:10|2021-10-30 20:52:05.468139074|2015-03-05 +115|-26932|-1432316859|-7712425776235274240|4449.85|2599136.85|4800598839440.474609|true|religion|alice laertes|2016-12-24 20:28:04|2020-10-30 17:18:06.83792354|2078-07-21 +110|13620|541923995||42103.61|-4674178.28|1720893396001.876953|false|chemistry|rachel miller|2075-04-16 08:04:13|2074-06-07 05:35:00.512374118|1972-06-09 +26|-14597|-453739759|738|47183.62|-2119967.88|887498368380.796875||debate|priscilla king|2039-11-15 02:45:05|2036-09-19 23:40:36.465715138|2071-11-05 +-117|-15708|2090044777|-9219066990552760320|6624.71||3101731188821.652344|false|zync studies||2020-10-12 16:15:01|2043-01-16 06:42:02.148351784| +|21611|121663320|-6941777546186579968|-11619.88|-4343606.89|-4320082997633.427734|false|quiet hour|bob ovid|2018-11-27 08:43:27|2041-02-04 14:27:46.359838893|2060-09-18 +0|-31365|950997304|1940|-13193.86|3767689.76|-2691146668859.509766|false|nap time|nick thompson|2066-11-12 23:31:11|2054-08-03 11:46:51.845164637|2013-10-21 +|6367|58313734|874|-40233.99|2400153.04|2583800042778.818359|true|debate|fred ovid|2034-12-16 09:40:00|2044-03-21 13:20:12.629260168|2049-11-02 +22|-17642|491758252|6934570741217755136|30293.17|-327841.35|662367380091.879883|true|mathematics|tom falkner|2042-08-31 04:48:48|2077-04-09 00:12:01.319049463|1990-10-03 +-63|16357|641695802|-6947955278050181120|-42902.63|-749433.19|-2227447197725.922363|false|values clariffication|victor davidson|2030-01-09 20:09:55|2017-07-28 09:06:35.882903381|1970-06-23 +-104|5093|14573904|-7037638331316469760||-4901719.01|310870329637.787109|false|quiet hour|gabriella thompson|2080-04-13 18:39:38||2066-08-05 +-47|18810|514046604||-38437.95|1021458.26|366223182729.825195|true|joggying|mike hernandez|2026-10-10 18:47:04|2029-06-30 05:09:50.574924351|2076-02-27 +90|-8309|-298221893|9078604269481148416|-8052.94|-2122723.08|3538556731222.399414|false|debate|fred zipper|2077-11-25 18:23:08|2034-09-24 23:20:54.523734503|1983-10-09 +6|-26439|-1262842192|-7797149520019062784|30563.30|-2562846.00|64345321594.530273|true|biology|ulysses quirinius|2022-03-13 02:30:45|2024-12-31 19:28:56.388733281|2009-02-10 +-57|-31163|-1171326281|3213|-9254.17|-4767214.00|-3673210988859.949219|false|zync studies|irene xylophone|2035-02-27 13:54:08|2033-11-23 22:19:50.479124061|2002-01-24 +-116|26158|656187584|7013693841855774720|16408.52|-4052966.58|-1456248032425.478027|true|undecided|victor xylophone|2037-07-14 09:27:06|2026-05-22 13:48:09.35938113|2030-06-19 +-55|-20532|1473503196|3904|43475.58|2693198.77|-3131418514216.193359|false|values clariffication|holly davidson|2024-04-22 08:37:38|2016-09-22 02:17:02.628508820| +48|8929|2013376408|2205|44946.09|3576291.75|-4871871797919.630859|false|zync studies|priscilla allen|2077-06-13 23:46:42|2069-04-25 15:46:10.268017072|1980-03-15 +-60|17242|-192181579|3739|23651.93|36466.81|-486686347287.950195||religion|luke allen|2039-04-13 23:05:11|2068-06-07 10:20:19.221041892|2068-08-15 +89|-11384|315055746|7747874976739016704|7523.22|1368814.97||false|biology|ethan white|2034-02-23 11:06:56|1970-01-01 00:00:00.906271939|2041-06-25 +51|18637|2048533360|8543177193114779648|-41617.02|482302.36|-1546046022526.041992|false|kindergarten||2037-01-09 15:06:40|2070-07-06 14:21:28.517162830|2008-08-25 +118|3253|1130043800|-9022154842129547264|27280.47|3522597.59|-4611990478400.116211|true|values clariffication|david davidson|2033-02-01 19:47:37|2027-11-17 10:28:47.80151123|2093-10-03 +-12|27693|768198315|-8205148279289085952|-10442.81|-1417623.57||true|forestry|oscar brown|2034-10-08 23:17:54|2067-09-04 22:14:04.110216000|2079-07-25 +-36||-2015780444|694|-32549.83|4035022.06|1218083012378.272461|false|education|calvin underhill|2028-09-24 17:15:42|2050-02-01 08:12:53.255232551|1978-03-08 +-5|25683||3031|12184.96|562852.37|-4282204832892.908691|true|values clariffication|fred quirinius|2058-05-17 17:05:29|2026-06-25 19:35:39.482657744|2012-01-05 +-112|31948|-1092872261|7731443941834678272|-27340.02|696583.58|445925390157.491211|false|american history||2068-08-30 08:33:56|2075-01-15 19:37:46.64329055| +-105|-21693|-216495498|4030|47377.17|3344812.32|-3851850447729.248047|false|undecided|luke zipper|2065-02-05 00:07:25|2024-04-23 14:54:23.798337287|2063-03-09 +54|16799|548375173|-7242345057866285056|-47704.81|-3013070.91|3857259195148.898438|true||priscilla xylophone|2014-05-21 16:56:48|2021-05-18 01:51:52.384206743|2100-12-30 +125|6836|-749042352|-8570933074545745920|44652.34|3366329.49|-4008667949143.891602|false|education|katie young|2039-02-03 16:20:06|2031-07-25 04:22:41.813392590|2020-04-29 +55|-24194|2008211296|-8843859708698583040|7897.35|-4026884.90|694259931060.621094||chemistry||2079-01-13 19:25:05|2027-03-16 05:02:02.622892439|2033-03-31 +18|-909|-679230165|3763|22434.94|-2614784.49|-2408454599738.405273|true|geology|bob brown|2048-12-09 03:44:53|2033-03-12 15:31:38.900750512|2021-10-15 +76|5025|-917062754|8927691194719174656||-4080669.24|-1253821021704.375488|true|mathematics|ethan polk|2022-07-07 04:40:23|2056-10-27 13:55:56.356962022|2076-06-15 +100|-24296|-573787626|8760285623204290560|9875.85|4289038.66|3622015549775.347656|true|american history|priscilla nixon|2021-03-18 13:07:08|2064-05-08 20:34:48.987828340|1999-05-17 +11|-23836|-684022323|83|-22059.14|2308864.42|3872836019012.560547|true|linguistics|victor falkner|2064-07-27 19:39:15|2049-08-04 21:08:50.945667590|1991-07-13 +28|21402|615661052|7220581538170413056|-7646.80|-224504.56|-2027136687217.812012||yard duty|bob ellison|2074-07-29 17:39:45|2030-09-21 04:36:18.771239180|2028-10-10 +25|1691|-189393743|-8833019327569510400|11578.06|-3411141.32|-4211242415167.078125|true|history|irene ellison|2059-02-04 00:47:19|2024-11-12 02:53:19.930891036|2025-01-09 +-59|15655|-574475259|8991071342495531008||-1231801.32|-1391928752253.796875|false|kindergarten|wendy polk|2043-04-11 13:08:42|2046-09-15 19:25:38.31358640|2009-02-25 +-117|-10533|-1878572820|3421|13420.65|-3115003.44|4116438735684.261719|false|geology|ulysses underhill|2065-03-12 06:39:39|2025-10-05 18:30:25.745589201|2000-04-18 +-28|-3757|-785261879|-7292078334519894016|-26290.78|-4341379.78|-1215647046531.849121|true|||2017-09-18 17:33:59|1970-01-01 00:00:00.186021557|1984-03-11 +15|-30020|1421779455|7575087487730196480|-31975.46|-306860.42|-2589464657625.448730|true|values clariffication|luke young|2061-10-06 02:46:43|2016-11-17 15:18:46.950616093|1986-08-13 +-38|6451||2991|-8411.35|-4769995.72|2950208151142.704102|false|values clariffication|priscilla quirinius|2049-10-15 00:08:43|2062-03-09 00:47:42.827453320|2049-09-08 +|-11010|-1197602595|-9058029636530003968|-22529.51|1373462.79|-4532489497069.640625|false|religion|alice steinbeck|2078-12-16 08:18:15||2097-08-10 +3|6451|-1805915233|-9075486079396069376||2520514.18|-2617393499995.408691|false|biology|ulysses brown|2013-05-22 00:46:26|2035-09-30 04:40:58.480366014|2066-01-15 +-3|-32266|1153089364|1165|-12435.84|-4294699.57|-4048791701161.213867||forestry|bob falkner|2068-04-10 02:10:55|2072-12-27 15:53:19.908321394|2020-10-07 +-106|-12016|1668446119|8002769767000145920|27137.25|4374020.68|2385292009394.177734|true|study skills|calvin ellison|2043-04-23 18:29:09|2061-06-12 15:38:43.680018947|1984-07-06 +|24439|-897586947|-8545239748068941824|-40482.48|961076.47|1537506193057.574219|true|study skills|wendy van buren|2013-06-05 13:16:07|2033-09-20 07:09:33.786243824|2000-12-25 +-119|1356|882762933|7608447395949109248|-44435.29|-1597982.22|1819216028686.661133|false|quiet hour|holly white|2050-04-07 12:22:04|2053-08-02 04:11:58.530743146|2077-09-04 +102|21102|659343542|2649|36014.13|3634713.92|-575000889640.584961|true|wind surfing|irene thompson|2053-06-26 18:35:11|2055-01-18 04:20:49.647058421|2073-10-29 +-57|-17689|2064448036|8219326436390821888|-11132.51|545680.43|255313813106.958984|false|mathematics|alice steinbeck|2060-05-20 01:34:27|2020-03-01 11:02:51.570578441|2067-01-30 +55|14606|1650573576|1368|15589.12|2009602.58|1589957480687.509766|false|study skills|alice quirinius|2073-04-01 06:04:51|2068-11-11 13:58:15.920659825|2021-07-29 +49|-26061|1499399891|8836228556823977984|33827.48|2814066.54|2510449522761.426758|true|religion|ulysses carson|2037-12-20 09:55:27|2030-04-30 08:25:31.854317407|2098-06-02 +92|-11110|-932921363|-7399631791131074560|-12942.75|-217946.54|2761978539424.099609|false|xylophone band|alice xylophone|2066-01-16 10:28:44|2048-01-14 10:07:27.759822202|1976-03-14 +-96|16764||688|-13758.52||361086010470.934570|true|industrial engineering|alice young|2049-06-14 05:59:55|2057-09-19 00:55:44.140779413|1989-10-20 +95|4400|-849551464|7393308503950548992|6643.90|-4983437.35|-4892969527019.808594|false|study skills|oscar davidson|2017-04-02 00:57:37|2029-01-06 20:49:41.36841326|2032-06-30 +49|-5282|958866509|-7326863346317598720|-49014.75|1930239.92|3900277315209.912109|false|study skills|xavier white|2030-06-26 15:48:19|2014-10-11 06:03:21.748393551|1970-11-29 +-80|27981|693876030|3630|-33417.02|-485326.44||true|wind surfing|yuri king|2060-11-13 06:28:35|2023-03-20 21:57:46.54524894|1993-08-04 +-4|-31514|-1183469360|736|-36356.74|3032443.29|-2245612358023.905273|true|nap time|alice king|2076-01-06 22:44:17|2024-08-23 20:51:10.725383226|2054-04-28 +-55|12642|1052255272|4051|16488.02|-4342989.61|1502351615472.861328|true|joggying|fred carson|2034-08-27 14:47:03|2076-08-14 13:53:30.24220682|1994-01-05 +-121|3316|-1153978907|7164349895861829632|43069.96|-1477117.22|-553625504920.703125|false|wind surfing|fred robinson|2065-07-04 13:04:21|2022-05-22 02:00:34.914704680|2058-05-24 +-61|-4371|785382955|-9218875542187065344|-8677.04|4451156.34|815754938728.617188|true|nap time|gabriella garcia|2040-07-17 22:55:56|2079-09-11 14:30:55.332790297|1975-02-11 +52|28360|-1989778424|-7687052294777208832|47634.41|-2314022.54|2507745331167.754883|false|xylophone band|luke falkner|2062-11-19 05:42:37|2020-09-22 17:22:25.133299538|2062-08-05 +-90|-12103|712625264|-7739424919198187520|-47667.77||427069368079.067383|true|linguistics|jessica brown|2051-04-07 00:21:48|2032-08-30 01:34:32.658298299|1994-03-17 +3|-1510|-1718163874|3682|35630.13|-3338857.45|-899893327656.009277|false|industrial engineering|priscilla white|2055-01-04 20:17:55|2077-03-07 22:59:33.441695704|2017-10-21 +42|10699|1372705672|8652485812846567424|9896.00|-3472573.25|-1087033141560.276367|false|nap time|victor thompson|2052-02-27 06:33:17|2065-01-20 19:20:28.802291910|2068-06-03 +|-24968|340929437|2285|-29910.91|-516691.22|2120806635551.702148|true|forestry|quinn brown|2072-08-31 12:44:15|2062-09-22 01:14:51.597985350|2065-10-09 +74|343|524317972|-7038455462786334720|-39904.28|2443665.68|-2295535553408.214355|true|values clariffication|ulysses miller|2066-10-07 09:47:01|2050-01-23 01:49:18.644001562|2051-05-04 +38|2085|1256676429|3067|268.34|568804.73||false|xylophone band|mike ovid|2038-12-04 21:44:33|2055-11-26 23:26:28.146636400|2042-06-25 +79|-23663|-1218592418|8644602243484803072|44403.78|3361937.88|-1489866826314.631836||chemistry|gabriella ovid|2035-04-27 15:14:36|2064-02-20 19:43:32.403661267|2012-02-17 +-92|4203|-1429346144|2270|21727.68|-421624.54|-3983022978825.019531|false|education|zach miller|2040-09-30 18:36:39|2017-11-20 20:46:21.657733108|2068-02-18 +48|-24884|1258721737|355|46929.58|1781934.96|3039600013709.673828|true|industrial engineering|jessica nixon|2071-05-01 19:05:12|2074-03-02 10:39:55.93641205|2016-07-15 +64|22618|254921167|9091085792947666944|-44704.24|711467.05|4658230495944.267578|true|chemistry|priscilla allen|2039-11-07 17:48:04||2060-12-11 +61|2677|253621570|-8411282676082565120||3151363.12|406664869016.423828|false|history|calvin carson|2015-05-24 21:22:45|2028-02-08 22:19:58.374126984|2090-08-05 +|-15862|656636097|-7535958203887706112|-17440.17|-2157150.34|-1827771009404.876953|false|xylophone band|luke carson|2041-09-06 16:13:03|2050-08-28 23:06:13.113935291|2083-07-13 +-33|32260|-534894953|1075|-356.94|-4324062.25|-890245194815.513184|true|yard duty|xavier ovid|2053-10-03 22:43:56|2079-07-14 01:00:00.460817725|2001-09-04 +-115||-2087815643|-8892963883085578240|34523.24|-3158421.12|-2845384798090.435547|false|chemistry|fred young|2016-05-12 06:45:13|2035-08-10 09:59:03.386985729|2005-03-02 +103|24663|-434747475|361|30729.61|-4264710.63|922131932518.013672|true|nap time|jessica underhill|2050-02-07 06:55:52|2067-11-14 22:33:08.493306478|1996-08-26 +56|24768|1202593021|7418271723644403712|-10868.07|2571256.58|2999497077887.412109|false|forestry|rachel carson|2049-03-10 07:25:18|2039-06-18 00:32:26.677441797|2058-04-19 +119|32671|-973128166|3054|-26979.86|-628111.85|-1680604442200.816895|true|topology|tom garcia|2030-05-20 09:35:25|2063-08-06 04:51:16.396723749|2004-11-17 +-31|-739|-982238309|939|-42173.51|3273743.27|4303282073846.900391|false|nap time|calvin polk|2013-10-04 10:43:21|2067-06-29 23:59:23.279014356| +-46|31502|139661585|8210813831744118784|21785.35|-268730.23|-1288193922702.625000|false|mathematics|priscilla quirinius||2043-03-23 17:03:08.504117612|2010-12-13 +103|-29646|-737624128|780|10202.27|-4999829.07||||victor falkner|2020-03-30 03:39:12|2038-03-24 14:36:17.459773339|2022-04-23 +-81|-29442|-1738775004|6996686091335884800|39542.16|138346.52|4321613687397.097656|true|values clariffication|mike hernandez|2045-09-03 21:41:39|2078-07-04 13:56:17.968990910|1974-11-30 +-34||65172363|3958|32525.84|2402710.27|-2898525744154.696289|true|linguistics|sarah ovid|2019-01-05 16:50:23|2017-03-12 07:24:44.150435697|2060-05-02 +38|31411|1893512909|-8244116388227104768|-16276.43|-4046624.13|4304561297863.359375|true|education|ethan ichabod|2036-06-21 17:40:38|2064-01-21 14:23:17.231854149|2083-05-22 +-70|-22726|1519993904|-8270479187688816640|13105.96|555119.13|-398063341865.580078|true|xylophone band|gabriella ellison|2030-08-30 02:23:04|2049-08-11 05:12:04.148231477|2013-04-10 +40|-302|-1955647385|3961|41353.46||-3450402956270.970703|true|study skills|irene steinbeck|2016-11-17 17:54:37|2062-11-14 17:05:46.987404583|2078-07-31 +107|-30534|-1043413503|3810|21433.28|1928485.12|4994550248722.298828|true|religion|mike allen|2062-04-28 19:08:00|2075-03-04 17:23:03.73018585|1993-06-15 +77||955171928|-6934304742087655424|34995.02|3971989.53|-475678432575.605469|false|opthamology||2068-03-16 07:25:04|2059-08-26 22:16:53.454090841|1995-10-15 +41|15417|335359004|2608|-13412.84|-3822500.97|1743451461698.625977|true|topology|bob ichabod|2076-02-06 03:08:08|2078-11-06 10:58:05.463087868|2041-06-18 +-78|25683|1362740312|-9157613004431998976|36846.57|-843617.87|3565868200700.777344|true|undecided|irene underhill|2040-12-26 23:08:17|2052-01-02 20:22:40.487097346|2031-08-17 +-73||-839176151|-8916987977485312000|-7674.72||-825771731801.268066||linguistics|bob white|2071-07-29 21:38:49|2014-06-27 11:10:51.195137809|2013-06-11 +-86||-7929246|3755|22920.73|3620278.52||true|biology|calvin thompson|2028-02-01 20:43:30|2056-11-04 04:25:34.640015080|1987-04-04 +120|-18933|-1063673827|5|14086.87|-980970.29|3047030016524.807617|false|religion|irene miller|2042-12-14 22:54:02|2017-12-27 18:22:06.809083462|1984-10-13 +36|-10972|990246086|1158|-16161.90|-66861.16|-1214938404709.578125|false|industrial engineering|holly allen|2064-01-13 04:12:41|2042-02-06 22:03:25.824550748|2016-12-06 +70|-32498|2013444562|66|22183.31|395235.40|-1179173001517.490723||nap time|ulysses white|2028-10-16 12:12:48|2080-06-21 03:11:31.104371855|2068-01-29 +17|-6502|-1702587308|7778936842502275072|-18436.78||-196657000634.594727|true|xylophone band||2046-01-31 05:17:31|2058-03-03 00:00:49.488336089|1973-09-16 +-95|17489|1137950964|-7470307155642245120|22234.00|-1300453.03|822293548806.492188|true|chemistry||2056-12-28 03:10:05|2026-03-02 01:50:43.137583417|2059-11-02 +-20|-4393|854230650||-21043.42|-572576.92|2869426367294.245117|true|zync studies|ethan laertes|2072-01-17 22:15:36|2030-04-10 07:32:47.194501657|1970-06-16 +101|-15578|-1838281337|7166263463731421184||4540045.88|442360758276.833984|true|kindergarten|ulysses carson|2024-03-16 20:04:25|2053-12-11 14:11:31.503793969|1992-05-13 +-9|-21117|1228837108|-8696162322976997376|-8618.55|1352592.61|2431582392595.643555|false|religion|fred davidson|2028-04-28 02:29:15|2039-04-15 10:54:59.268945168|2014-02-16 +-115|19635|37461818||2559.39|-4447382.23|-2197895735917.754395|false|undecided|jessica davidson|2062-07-15 09:24:13|2021-10-13 15:51:41.536144896| +39|-12904|-1333770335|7922443154272395264|28530.91|4004643.53|-4769680038655.472656|false|american history|david miller|2021-10-12 22:52:55||2023-03-20 +89||-337586880|3409|-45141.21|4716026.64|-4809051982087.346680|false|education|gabriella steinbeck|2076-10-13 16:12:50|2038-02-10 15:55:46.450748066|2073-03-18 +59|-24336|-434656160|7432998950057975808|48914.73|-4245153.17|-1477613811579.736816|true|education|quinn white|2019-05-21 17:55:59|2057-12-20 10:40:00.412591715|1985-06-13 +2|24244||-7668388017287020544|38976.85|-4098435.05|746390817981.960938|false|study skills|jessica miller|2021-11-22 00:30:17|2069-09-19 03:31:53.518229478|1990-01-04 +|-22163|-1104268719|1678||2494279.70||true|opthamology|ulysses brown|2018-05-17 19:02:30|2023-06-07 17:50:49.214370663|2061-01-24 +-57|11814|88774647|-7949309059286163456|-27993.19|-2150345.09|-3250755733061.261230|false|kindergarten|priscilla miller|2056-01-01 10:54:32||2105-03-09 +-127|14180|1969239701|1270|44944.40|467185.02|270595139779.458984|true|biology|yuri nixon|2017-10-26 04:06:34|2038-07-28 08:46:47.990750284|2085-09-24 +44|12014|-1716506227|3462|-15942.62|2202747.77|3306174177127.374023|true|wind surfing|david garcia|2017-01-10 09:58:30|2061-08-24 19:56:11.80420459| +-98|-18646|584084934|3089|49639.10||-4997414117561.546875|true|forestry|tom van buren|2040-07-28 08:47:33|2029-09-05 18:34:33.984926242|2013-09-12 \ No newline at end of file diff --git a/data/files/web_sales_2k b/data/files/web_sales_2k new file mode 100644 index 0000000..a1cbb21 --- /dev/null +++ b/data/files/web_sales_2k @@ -0,0 +1,2000 @@ +2451383|73313|2451482|4591|83074|596485|1096|40907|85919|41329|1140|1351|43|4|4|5|6|1|57|33.59|59.45|38.04|1220.37|2168.28|1914.63|3388.65|50.95|1149.18|575.70|1019.10|1070.05|1594.80|1645.75|-895.53| +2451383|73313|2451411|3566|83074|596485|1096|40907|85919|41329|1140|1351|28|7|3|2|271|1|38|29.83|48.92|26.41|855.38|1003.58|1133.54|1858.96|30.10|0.00|910.86|1003.58|1033.68|1914.44|1944.54|-129.96| +2451383|73313|2451413|7286|83074|596485|1096|40907|85919|41329|1140|1351|58|28|10|5|300|1|32|49.72|107.89|97.10|345.28|3107.20|1591.04|3452.48|124.28|0.00|828.48|3107.20|3231.48|3935.68|4059.96|1516.16| +2451383|73313|2451393|2755|83074|596485|1096|40907|85919|41329|1140|1351|2|7|6|1|63|1|65|69.18|112.07|22.41|5827.90|1456.65|4496.70|7284.55|29.13|0.00|2476.50|1456.65|1485.78|3933.15|3962.28|-3040.05| +2451383|73313|2451502|2516|83074|596485|1096|40907|85919|41329|1140|1351|56|16|2|5|18|1|58|36.62|41.38|16.13|1464.50|935.54|2123.96|2400.04|84.19|0.00|167.62|935.54|1019.73|1103.16|1187.35|-1188.42| +2451383|73313|2451421|16966|83074|596485|1096|40907|85919|41329|1140|1351|50|7|2|2|185|1|90|72.05|161.39|27.43|12056.40|2468.70|6484.50|14525.10|74.06|0.00|4647.60|2468.70|2542.76|7116.30|7190.36|-4015.80| +2451383|73313|2451457|10402|83074|596485|1096|40907|85919|41329|1140|1351|56|16|16|3|293|1|15|83.92|174.55|69.82|1570.95|1047.30|1258.80|2618.25|20.94|0.00|392.70|1047.30|1068.24|1440.00|1460.94|-211.50| +2451383|73313|2451430|1735|83074|596485|1096|40907|85919|41329|1140|1351|25|19|14|3|202|1|16|45.27|128.56|68.13|966.88|1090.08|724.32|2056.96|87.20|0.00|370.24|1090.08|1177.28|1460.32|1547.52|365.76| +2451383|73313|2451458|15464|83074|596485|1096|40907|85919|41329|1140|1351|13|26|8|5|49|1|54|53.45|60.39|26.57|1826.28|1434.78|2886.30|3261.06|48.78|215.21|488.70|1219.57|1268.35|1708.27|1757.05|-1666.73| +2452625|34964|2452702|8925|42296|436090|2684|37278|57428|1487225|3907|30901|9|13|16|3|267|2|47|49.64|68.99|12.41|2659.26|583.27|2333.08|3242.53|52.49|0.00|1134.58|583.27|635.76|1717.85|1770.34|-1749.81| +2452625|34964|2452638|11041|42296|436090|2684|37278|57428|1487225|3907|30901|1|13|16|2|221|2|12|7.24|12.16|11.91|3.00|142.92|86.88|145.92|10.00|0.00|29.16|142.92|152.92|172.08|182.08|56.04| +2452625|34964|2452645|645|42296|436090|2684|37278|57428|1487225|3907|30901|15|21|7|5|246|2|6|45.21|67.36|30.31|222.30|181.86|271.26|404.16|16.36|0.00|185.88|181.86|198.22|367.74|384.10|-89.40| +2452625|34964|2452690|12453|42296|436090|2684|37278|57428|1487225|3907|30901|57|12|3|5|237|2|43|9.62|13.08|9.02|174.58|387.86|413.66|562.44|19.39|0.00|117.82|387.86|407.25|505.68|525.07|-25.80| +2452625|34964|2452707|13831|42296|436090|2684|37278|57428|1487225|3907|30901|18|19|1|2|223|2|57|35.95|62.55|43.78|1069.89|2495.46|2049.15|3565.35|224.59|0.00|962.16|2495.46|2720.05|3457.62|3682.21|446.31| +2452625|34964|2452640|9559|42296|436090|2684|37278|57428|1487225|3907|30901|6|21|6|4|72|2|93|65.80|152.65|29.00|11499.45|2697.00|6119.40|14196.45|215.76|0.00|7097.76|2697.00|2912.76|9794.76|10010.52|-3422.40| +2452625|34964|2452634|8085|42296|436090|2684|37278|57428|1487225|3907|30901|45|3|18|2|276|2|93|55.42|127.46|1.27|11735.67|118.11|5154.06|11853.78|4.72|0.00|1421.97|118.11|122.83|1540.08|1544.80|-5035.95| +2452625|34964|2452734|12081|42296|436090|2684|37278|57428|1487225|3907|30901|9|27|3|2|94|2|82|48.65|91.94|8.27|6860.94|678.14|3989.30|7539.08|13.56|0.00|2788.82|678.14|691.70|3466.96|3480.52|-3311.16| +2452625|34964|2452739|15739|42296|436090|2684|37278|57428|1487225|3907|30901|24|3|9|4|99|2|33|80.23|90.65|55.29|1166.88|1824.57|2647.59|2991.45|0.00|0.00|478.50|1824.57|1824.57|2303.07|2303.07|-823.02| +2451754|1529|2451801|17431|74800|1219525|3450|3375|84180|293885|5550|41475|49|8|2|2|259|3|65|5.37|10.31|2.06|536.25|133.90|349.05|670.15|7.47|9.37|26.65|124.53|132.00|151.18|158.65|-224.52| +2451754|1529|2451791|4694|74800|1219525|3450|3375|84180|293885|5550|41475|13|25|9|5|91|3|74|74.66|83.61|52.67|2289.56|3897.58|5524.84|6187.14|116.92|0.00|1546.60|3897.58|4014.50|5444.18|5561.10|-1627.26| +2451754|1529|2451814|2189|74800|1219525|3450|3375|84180|293885|5550|41475|8|26|3|4|18|3|26|77.15|198.27|109.04|2319.98|2835.04|2005.90|5155.02|36.28|2381.43|206.18|453.61|489.89|659.79|696.07|-1552.29| +2451754|1529|2451765|151|74800|1219525|3450|3375|84180|293885|5550|41475|23|7|3|5|167|3|25|99.00|292.05|262.84|730.25|6571.00|2475.00|7301.25|65.71|0.00|3285.50|6571.00|6636.71|9856.50|9922.21|4096.00| +2451754|1529|2451855|10793|74800|1219525|3450|3375|84180|293885|5550|41475|11|14|7|4|298|3|80|14.55|42.34|27.09|1220.00|2167.20|1164.00|3387.20|43.34|0.00|609.60|2167.20|2210.54|2776.80|2820.14|1003.20| +2451754|1529|2451862|12782|74800|1219525|3450|3375|84180|293885|5550|41475|20|2|13|4|275|3|54|64.58|160.80|120.60|2170.80|6512.40|3487.32|8683.20|586.11|0.00|3646.62|6512.40|7098.51|10159.02|10745.13|3025.08| +2451754|1529|2451809|15769|74800|1219525|3450|3375|84180|293885|5550|41475|35|17|20|1|162|3|30|54.81|81.11|22.71|1752.00|681.30|1644.30|2433.30|6.81|0.00|0.00|681.30|688.11|681.30|688.11|-963.00| +2451754|1529|2451782|5549|74800|1219525|3450|3375|84180|293885|5550|41475|29|26|6|3|170|3|19|87.13|170.77|155.40|292.03|2952.60|1655.47|3244.63|88.57|0.00|486.59|2952.60|3041.17|3439.19|3527.76|1297.13| +2451754|1529|2451760|12475|74800|1219525|3450|3375|84180|293885|5550|41475|44|11|16|4|232|3|29|86.36|90.67|68.90|631.33|1998.10|2504.44|2629.43|159.84|0.00|236.64|1998.10|2157.94|2234.74|2394.58|-506.34| +2451754|1529|2451812|5324|74800|1219525|3450|3375|84180|293885|5550|41475|26|26|4|4|277|3|2|4.21|10.90|0.54|20.72|1.08|8.42|21.80|0.07|0.00|5.88|1.08|1.15|6.96|7.03|-7.34| +2451754|1529|2451846|15686|74800|1219525|3450|3375|84180|293885|5550|41475|53|11|18|2|79|3|42|56.94|107.04|102.75|180.18|4315.50|2391.48|4495.68|43.15|0.00|1708.14|4315.50|4358.65|6023.64|6066.79|1924.02| +2451754|1529|2451858|12977|74800|1219525|3450|3375|84180|293885|5550|41475|26|14|12|1|50|3|91|88.66|242.04|183.95|5286.19|16739.45|8068.06|22025.64|502.18|0.00|0.00|16739.45|17241.63|16739.45|17241.63|8671.39| +2451754|1529|2451758|1874|74800|1219525|3450|3375|84180|293885|5550|41475|43|29|15|3|98|3|10|32.29|45.85|30.71|151.40|307.10|322.90|458.50|0.61|276.39|137.50|30.71|31.32|168.21|168.82|-292.19| +2451754|1529|2451801|8281|74800|1219525|3450|3375|84180|293885|5550|41475|26|1|18|5|59|3|1|66.29|174.34|137.72|36.62|137.72|66.29|174.34|4.62|71.61|45.32|66.11|70.73|111.43|116.05|-0.18| +2451754|1529|2451763|17867|74800|1219525|3450|3375|84180|293885|5550|41475|7|5|19|5|14|3|7|66.22|145.68|144.22|10.22|1009.54|463.54|1019.76|80.76|0.00|193.69|1009.54|1090.30|1203.23|1283.99|546.00| +2451754|1529|2451807|10760|74800|1219525|3450|3375|84180|293885|5550|41475|47|25|2|3|17|3|10|87.53|150.55|58.71|918.40|587.10|875.30|1505.50|27.12|135.03|316.10|452.07|479.19|768.17|795.29|-423.23| +2451483|77832|2451501|11842|89187|512359|4331|19456|87757|1249940|2525|37697|38|13|11|3|192|4|83|4.36|10.15|4.66|455.67|386.78|361.88|842.45|27.07|0.00|151.06|386.78|413.85|537.84|564.91|24.90| +2451483|77832|2451554|6718|89187|512359|4331|19456|87757|1249940|2525|37697|1|10|4|4|74|4|20|71.90|103.53|85.92|352.20|1718.40|1438.00|2070.60|154.65|0.00|227.60|1718.40|1873.05|1946.00|2100.65|280.40| +2451483|77832|2451542|3592|89187|512359|4331|19456|87757|1249940|2525|37697|38|20|12|5|201|4|26|81.47|126.27|27.77|2561.00|722.02|2118.22|3283.02|50.54|0.00|1411.54|722.02|772.56|2133.56|2184.10|-1396.20| +2451483|77832|2451563|14779|89187|512359|4331|19456|87757|1249940|2525|37697|16|22|12|2|125|4|32|30.81|40.05|22.02|576.96|704.64|985.92|1281.60|35.23|0.00|358.72|704.64|739.87|1063.36|1098.59|-281.28| +2451483|77832|2451593|5908|89187|512359|4331|19456|87757|1249940|2525|37697|55|8|2|1|93|4|44|68.96|143.43|61.67|3597.44|2713.48|3034.24|6310.92|135.67|0.00|0.00|2713.48|2849.15|2713.48|2849.15|-320.76| +2451483|77832|2451487|7636|89187|512359|4331|19456|87757|1249940|2525|37697|26|4|12|1|30|4|41|6.21|11.92|1.31|435.01|53.71|254.61|488.72|3.75|0.00|190.24|53.71|57.46|243.95|247.70|-200.90| +2451483|77832|2451487|520|89187|512359|4331|19456|87757|1249940|2525|37697|37|2|2|5|265|4|22|71.63|113.89|69.47|977.24|1528.34|1575.86|2505.58|137.55|0.00|1077.34|1528.34|1665.89|2605.68|2743.23|-47.52| +2451483|77832|2451569|12574|89187|512359|4331|19456|87757|1249940|2525|37697|13|16|11|5|202|4|51|43.92|94.86|52.17|2177.19|2660.67|2239.92|4837.86|102.17|106.42|1209.21|2554.25|2656.42|3763.46|3865.63|314.33| +2451483|77832|2451536|997|89187|512359|4331|19456|87757|1249940|2525|37697|20|26|20|3|159|4|50|15.41|17.41|6.26|557.50|313.00|770.50|870.50|9.20|159.63|330.50|153.37|162.57|483.87|493.07|-617.13| +2451483|77832|2451521|13090|89187|512359|4331|19456|87757|1249940|2525|37697|19|25|6|2|105|4|65|34.29|75.43|18.10|3726.45|1176.50|2228.85|4902.95|70.59|0.00|1176.50|1176.50|1247.09|2353.00|2423.59|-1052.35| +2451483|77832|2451518|1600|89187|512359|4331|19456|87757|1249940|2525|37697|46|13|3|1|249|4|37|35.86|89.29|0.00|3303.73|0.00|1326.82|3303.73|0.00|0.00|1090.02|0.00|0.00|1090.02|1090.02|-1326.82| +2451945|25492|2451977|16217|66395|1553493|1391|32089|22505|1123302|983|19681|31|1|6|1|166|5|57|69.75|192.51|102.03|5157.36|5815.71|3975.75|10973.07|174.47|0.00|2633.40|5815.71|5990.18|8449.11|8623.58|1839.96| +2451945|25492|2451960|14577|66395|1553493|1391|32089|22505|1123302|983|19681|55|17|20|1|252|5|50|42.11|58.95|46.57|619.00|2328.50|2105.50|2947.50|162.99|0.00|58.50|2328.50|2491.49|2387.00|2549.99|223.00| +2451945|25492|2452011|5559|66395|1553493|1391|32089|22505|1123302|983|19681|29|5|1|2|230|5|89|70.02|77.72|55.18|2006.06|4911.02|6231.78|6917.08|147.33|0.00|2766.12|4911.02|5058.35|7677.14|7824.47|-1320.76| +2451945|25492|2451956|8633|66395|1553493|1391|32089|22505|1123302|983|19681|21|9|19|3|220|5|69|21.00|24.36|13.64|739.68|941.16|1449.00|1680.84|9.41|0.00|487.14|941.16|950.57|1428.30|1437.71|-507.84| +2451945|25492|2451954|14617|66395|1553493|1391|32089|22505|1123302|983|19681|29|27|16|5|34|5|26|26.29|28.65|18.33|268.32|476.58|683.54|744.90|33.36|0.00|186.16|476.58|509.94|662.74|696.10|-206.96| +2451945|25492|2451949|11747|66395|1553493|1391|32089|22505|1123302|983|19681|51|1|9|4|62|5|94|66.79|165.63|66.25|9341.72|6227.50|6278.26|15569.22|560.47|0.00|2491.00|6227.50|6787.97|8718.50|9278.97|-50.76| +2451945|25492|2452018|7243|66395|1553493|1391|32089|22505|1123302|983|19681|29|21|14|5|133|5|37|16.03|20.67|0.62|741.85|22.94|593.11|764.79|0.00|0.00|367.04|22.94|22.94|389.98|389.98|-570.17| +2451945|25492|2452044|875|66395|1553493|1391|32089|22505|1123302|983|19681|35|17|10|1|35|5|51|10.48|30.18|23.84|323.34|1215.84|534.48|1539.18|23.58|36.47|122.91|1179.37|1202.95|1302.28|1325.86|644.89| +2451945|25492|2452046|6215|66395|1553493|1391|32089|22505|1123302|983|19681|57|19|8|4|170|5|45|67.42|113.26|41.90|3211.20|1885.50|3033.90|5096.70|56.56|0.00|2497.05|1885.50|1942.06|4382.55|4439.11|-1148.40| +2451945|25492|2452060|10215|66395|1553493|1391|32089|22505|1123302|983|19681|23|7|6|3|10|5|3|47.96|84.88|1.69|249.57|5.07|143.88|254.64|0.20|0.00|30.54|5.07|5.27|35.61|35.81|-138.81| +2451945|25492|2452065|7665|66395|1553493|1391|32089|22505|1123302|983|19681|49|13|2|4|36|5|71|68.25|152.19|4.56|10481.73|323.76|4845.75|10805.49|29.13|0.00|648.23|323.76|352.89|971.99|1001.12|-4521.99| +2451945|25492|2451997|9961|66395|1553493|1391|32089|22505|1123302|983|19681|59|3|7|2|54|5|57|25.26|42.18|2.53|2260.05|144.21|1439.82|2404.26|8.65|0.00|624.72|144.21|152.86|768.93|777.58|-1295.61| +2451945|25492|2451977|14731|66395|1553493|1391|32089|22505|1123302|983|19681|3|27|16|3|110|5|66|66.72|166.80|90.07|5064.18|5944.62|4403.52|11008.80|416.12|0.00|1651.32|5944.62|6360.74|7595.94|8012.06|1541.10| +2451945|25492|2452005|3175|66395|1553493|1391|32089|22505|1123302|983|19681|43|17|9|1|16|5|60|30.82|56.70|29.48|1633.20|1768.80|1849.20|3402.00|0.00|1715.73|475.80|53.07|53.07|528.87|528.87|-1796.13| +2451945|25492|2452002|6363|66395|1553493|1391|32089|22505|1123302|983|19681|3|21|7|1|112|5|13|19.64|34.17|17.76|213.33|230.88|255.32|444.21|6.92|0.00|75.40|230.88|237.80|306.28|313.20|-24.44| +2451945|25492|2451998|3273|66395|1553493|1391|32089|22505|1123302|983|19681|15|17|7|3|259|5|63|9.36|19.74|16.38|211.68|1031.94|589.68|1243.62|35.39|526.28|522.27|505.66|541.05|1027.93|1063.32|-84.02| +2451534|69300|2451633|4879|89701|202409|1164|31472|63718|215362|6843|25951|43|28|1|4|17|6|12|56.04|96.38|89.63|81.00|1075.56|672.48|1156.56|96.80|0.00|69.36|1075.56|1172.36|1144.92|1241.72|403.08| +2451534|69300|2451644|16009|89701|202409|1164|31472|63718|215362|6843|25951|16|25|10|4|18|6|73|36.58|41.70|35.44|456.98|2587.12|2670.34|3044.10|77.61|0.00|1247.57|2587.12|2664.73|3834.69|3912.30|-83.22| +2451534|69300|2451610|7712|89701|202409|1164|31472|63718|215362|6843|25951|58|28|10|5|277|6|91|48.63|65.16|3.25|5633.81|295.75|4425.33|5929.56|2.95|0.00|2489.76|295.75|298.70|2785.51|2788.46|-4129.58| +2451534|69300|2451651|11413|89701|202409|1164|31472|63718|215362|6843|25951|26|10|8|5|42|6|51|42.52|121.60|38.91|4217.19|1984.41|2168.52|6201.60|0.00|754.07|557.94|1230.34|1230.34|1788.28|1788.28|-938.18| +2451534|69300|2451639|14210|89701|202409|1164|31472|63718|215362|6843|25951|8|25|1|4|217|6|1|99.76|206.50|0.00|206.50|0.00|99.76|206.50|0.00|0.00|33.04|0.00|0.00|33.04|33.04|-99.76| +2451534|69300|2451583|6529|89701|202409|1164|31472|63718|215362|6843|25951|52|1|5|1|230|6|83|39.67|47.60|34.74|1067.38|2883.42|3292.61|3950.80|259.50|0.00|1026.71|2883.42|3142.92|3910.13|4169.63|-409.19| +2451534|69300|2451576|13015|89701|202409|1164|31472|63718|215362|6843|25951|37|13|5|3|91|6|63|97.02|121.27|103.07|1146.60|6493.41|6112.26|7640.01|12.98|6233.67|2597.49|259.74|272.72|2857.23|2870.21|-5852.52| +2451534|69300|2451604|15250|89701|202409|1164|31472|63718|215362|6843|25951|38|10|2|3|114|6|11|16.62|23.76|2.61|232.65|28.71|182.82|261.36|2.29|0.00|73.15|28.71|31.00|101.86|104.15|-154.11| +2451178|76807|2451181|12710|48271|1161066|3908|32303|96091|233980|782|16777|2|1|10|2|201|7|47|92.35|229.02|114.51|5381.97|5381.97|4340.45|10763.94|53.81|0.00|4843.35|5381.97|5435.78|10225.32|10279.13|1041.52| +2451178|76807|2451279|7555|48271|1161066|3908|32303|96091|233980|782|16777|1|16|4|2|100|7|44|52.52|126.04|122.25|166.76|5379.00|2310.88|5545.76|430.32|0.00|1219.68|5379.00|5809.32|6598.68|7029.00|3068.12| +2451178|76807|2451240|11416|48271|1161066|3908|32303|96091|233980|782|16777|10|2|15|1|27|7|87|4.47|4.82|3.85|84.39|334.95|388.89|419.34|30.14|0.00|108.75|334.95|365.09|443.70|473.84|-53.94| +2451178|76807|2451210|4856|48271|1161066|3908|32303|96091|233980|782|16777|16|10|3|5|267|7|9|70.18|106.67|44.80|556.83|403.20|631.62|960.03|0.00|249.98|470.34|153.22|153.22|623.56|623.56|-478.40| +2451178|76807|2451201|13342|48271|1161066|3908|32303|96091|233980|782|16777|49|1|20|1|95|7|13|49.14|63.88|31.94|415.22|415.22|638.82|830.44|6.56|87.19|215.80|328.03|334.59|543.83|550.39|-310.79| +2451178|76807|2451287|6628|48271|1161066|3908|32303|96091|233980|782|16777|16|20|6|3|250|7|52|51.52|79.85|59.08|1080.04|3072.16|2679.04|4152.20|0.00|0.00|2034.24|3072.16|3072.16|5106.40|5106.40|393.12| +2451178|76807|2451268|15808|48271|1161066|3908|32303|96091|233980|782|16777|1|8|19|5|24|7|1|64.29|145.93|135.71|10.22|135.71|64.29|145.93|0.00|0.00|72.96|135.71|135.71|208.67|208.67|71.42| +2451178|76807|2451243|1225|48271|1161066|3908|32303|96091|233980|782|16777|2|1|20|1|35|7|44|94.24|201.67|12.10|8341.08|532.40|4146.56|8873.48|21.29|0.00|4170.32|532.40|553.69|4702.72|4724.01|-3614.16| +2452190|51596|2452220|6047|77108|1572482|5456|22830|53825|1895897|5705|33479|41|29|19|3|244|8|76|55.63|72.31|54.23|1374.08|4121.48|4227.88|5495.56|82.42|0.00|164.16|4121.48|4203.90|4285.64|4368.06|-106.40| +2452190|51596|2452307|4603|77108|1572482|5456|22830|53825|1895897|5705|33479|49|21|20|1|15|8|33|31.08|66.20|35.74|1005.18|1179.42|1025.64|2184.60|106.14|0.00|633.27|1179.42|1285.56|1812.69|1918.83|153.78| +2452190|51596|2452216|7563|77108|1572482|5456|22830|53825|1895897|5705|33479|17|19|8|1|244|8|55|45.57|83.84|66.23|968.55|3642.65|2506.35|4611.20|218.55|0.00|1383.25|3642.65|3861.20|5025.90|5244.45|1136.30| +2452190|51596|2452213|9823|77108|1572482|5456|22830|53825|1895897|5705|33479|41|17|14|2|190|8|70|34.71|71.50|50.05|1501.50|3503.50|2429.70|5005.00|0.00|0.00|2051.70|3503.50|3503.50|5555.20|5555.20|1073.80| +2452190|51596|2452242|2247|77108|1572482|5456|22830|53825|1895897|5705|33479|7|13|17|5|60|8|10|28.39|67.00|7.37|596.30|73.70|283.90|670.00|1.47|0.00|180.90|73.70|75.17|254.60|256.07|-210.20| +2452190|51596|2452221|13143|77108|1572482|5456|22830|53825|1895897|5705|33479|59|21|2|3|157|8|87|88.69|201.32|30.19|14888.31|2626.53|7716.03|17514.84|0.00|0.00|1050.09|2626.53|2626.53|3676.62|3676.62|-5089.50| +2452190|51596|2452276|6545|77108|1572482|5456|22830|53825|1895897|5705|33479|25|15|13|3|173|8|22|46.33|81.54|44.84|807.40|986.48|1019.26|1793.88|49.32|0.00|807.18|986.48|1035.80|1793.66|1842.98|-32.78| +2452190|51596|2452305|12675|77108|1572482|5456|22830|53825|1895897|5705|33479|21|3|13|4|148|8|96|79.34|204.69|83.92|11593.92|8056.32|7616.64|19650.24|644.50|0.00|392.64|8056.32|8700.82|8448.96|9093.46|439.68| +2452190|51596|2452267|14961|77108|1572482|5456|22830|53825|1895897|5705|33479|53|19|13|1|169|8|91|44.37|108.70|61.95|4254.25|5637.45|4037.67|9891.70|56.37|0.00|197.47|5637.45|5693.82|5834.92|5891.29|1599.78| +2452190|51596|2452309|9843|77108|1572482|5456|22830|53825|1895897|5705|33479|1|17|4|2|197|8|44|70.93|75.18|10.52|2845.04|462.88|3120.92|3307.92|36.66|4.62|1223.64|458.26|494.92|1681.90|1718.56|-2662.66| +2452190|51596|2452211|2351|77108|1572482|5456|22830|53825|1895897|5705|33479|7|11|18|5|42|8|33|54.70|140.03|137.22|92.73|4528.26|1805.10|4620.99|90.56|0.00|2079.33|4528.26|4618.82|6607.59|6698.15|2723.16| +2452190|51596|2452243|9519|77108|1572482|5456|22830|53825|1895897|5705|33479|33|29|3|1|286|8|59|25.18|37.01|0.37|2161.76|21.83|1485.62|2183.59|0.74|3.27|567.58|18.56|19.30|586.14|586.88|-1467.06| +2452190|51596|2452305|8767|77108|1572482|5456|22830|53825|1895897|5705|33479|17|23|5|3|28|8|64|5.57|7.07|1.83|335.36|117.12|356.48|452.48|0.82|105.40|153.60|11.72|12.54|165.32|166.14|-344.76| +2452190|51596|2452192|7747|77108|1572482|5456|22830|53825|1895897|5705|33479|9|5|11|4|207|8|44|58.29|112.49|22.49|3960.00|989.56|2564.76|4949.56|89.06|0.00|247.28|989.56|1078.62|1236.84|1325.90|-1575.20| +2452190|51596|2452242|5899|77108|1572482|5456|22830|53825|1895897|5705|33479|51|9|17|4|108|8|93|18.56|18.93|3.97|1391.28|369.21|1726.08|1760.49|0.00|0.00|704.01|369.21|369.21|1073.22|1073.22|-1356.87| +2450900|56256|2450988|6169|12676|1815460|4197|8581|21248|1833962|39|14023|4|19|10|2|174|9|16|96.14|256.69|10.26|3942.88|164.16|1538.24|4107.04|0.00|0.00|2053.44|164.16|164.16|2217.60|2217.60|-1374.08| +2450900|56256|2450943|11930|12676|1815460|4197|8581|21248|1833962|39|14023|14|20|17|5|36|9|29|76.54|210.48|134.70|2197.62|3906.30|2219.66|6103.92|156.25|0.00|365.98|3906.30|4062.55|4272.28|4428.53|1686.64| +2450900|56256|2450988|9517|12676|1815460|4197|8581|21248|1833962|39|14023|7|4|2|4|293|9|75|77.80|126.03|113.42|945.75|8506.50|5835.00|9452.25|58.69|2637.01|2646.00|5869.49|5928.18|8515.49|8574.18|34.49| +2450900|56256|2451000|9637|12676|1815460|4197|8581|21248|1833962|39|14023|10|13|11|2|153|9|92|47.71|59.16|33.12|2395.68|3047.04|4389.32|5442.72|60.94|0.00|2012.96|3047.04|3107.98|5060.00|5120.94|-1342.28| +2450900|56256|2450994|13790|12676|1815460|4197|8581|21248|1833962|39|14023|28|16|6|5|217|9|30|84.41|243.10|126.41|3500.70|3792.30|2532.30|7293.00|303.38|0.00|802.20|3792.30|4095.68|4594.50|4897.88|1260.00| +2450900|56256|2450983|14032|12676|1815460|4197|8581|21248|1833962|39|14023|46|7|4|4|131|9|94|93.91|145.56|1.45|13546.34|136.30|8827.54|13682.64|8.17|0.00|2325.56|136.30|144.47|2461.86|2470.03|-8691.24| +2450900|56256|2450902|5780|12676|1815460|4197|8581|21248|1833962|39|14023|32|28|13|3|256|9|79|74.37|135.35|102.86|2566.71|8125.94|5875.23|10692.65|162.51|0.00|1496.26|8125.94|8288.45|9622.20|9784.71|2250.71| +2450900|56256|2451020|8896|12676|1815460|4197|8581|21248|1833962|39|14023|26|14|9|3|267|9|6|10.40|24.85|12.92|71.58|77.52|62.40|149.10|1.55|0.00|41.70|77.52|79.07|119.22|120.77|15.12| +2450900|56256|2450989|5962|12676|1815460|4197|8581|21248|1833962|39|14023|50|7|12|5|178|9|19|34.34|72.11|26.68|863.17|506.92|652.46|1370.09|10.13|0.00|287.66|506.92|517.05|794.58|804.71|-145.54| +2450900|56256|2450971|12166|12676|1815460|4197|8581|21248|1833962|39|14023|31|19|1|5|186|9|10|51.43|153.77|59.97|938.00|599.70|514.30|1537.70|5.99|0.00|445.90|599.70|605.69|1045.60|1051.59|85.40| +2450900|56256|2450922|10303|12676|1815460|4197|8581|21248|1833962|39|14023|4|16|18|2|73|9|50|43.85|83.31|4.99|3916.00|249.50|2192.50|4165.50|7.48|0.00|999.50|249.50|256.98|1249.00|1256.48|-1943.00| +2450900|56256|2450994|4864|12676|1815460|4197|8581|21248|1833962|39|14023|2|10|13|2|184|9|32|6.05|7.19|3.37|122.24|107.84|193.60|230.08|8.62|0.00|78.08|107.84|116.46|185.92|194.54|-85.76| +2450900|56256|2450975|15307|12676|1815460|4197|8581|21248|1833962|39|14023|20|28|9|3|80|9|19|99.99|266.97|248.28|355.11|4717.32|1899.81|5072.43|94.34|0.00|1115.87|4717.32|4811.66|5833.19|5927.53|2817.51| +2450900|56256|2450906|9236|12676|1815460|4197|8581|21248|1833962|39|14023|46|20|18|2|69|9|63|21.25|23.16|21.53|102.69|1356.39|1338.75|1459.08|40.69|0.00|116.55|1356.39|1397.08|1472.94|1513.63|17.64| +2450900|56256|2450980|16741|12676|1815460|4197|8581|21248|1833962|39|14023|40|7|17|2|140|9|5|2.57|5.37|2.04|16.65|10.20|12.85|26.85|0.30|0.00|7.20|10.20|10.50|17.40|17.70|-2.65| +2451821|29187|2451843|6797|1204|1306794|31|21268|98174|67833|6132|42569|56|2|5|3|11|10|39|15.52|43.61|30.52|510.51|1190.28|605.28|1700.79|0.00|1047.44|135.72|142.84|142.84|278.56|278.56|-462.44| +2451821|29187|2451839|13951|1204|1306794|31|21268|98174|67833|6132|42569|55|2|6|5|14|10|11|28.39|52.52|47.79|52.03|525.69|312.29|577.72|15.77|0.00|254.10|525.69|541.46|779.79|795.56|213.40| +2451821|29187|2451902|6146|1204|1306794|31|21268|98174|67833|6132|42569|1|26|17|5|143|10|11|18.69|34.95|19.92|165.33|219.12|205.59|384.45|6.57|0.00|3.74|219.12|225.69|222.86|229.43|13.53| +2451821|29187|2451823|4937|1204|1306794|31|21268|98174|67833|6132|42569|8|17|3|5|44|10|78|70.93|90.08|36.93|4145.70|2880.54|5532.54|7026.24|57.61|0.00|280.80|2880.54|2938.15|3161.34|3218.95|-2652.00| +2451821|29187|2451932|15797|1204|1306794|31|21268|98174|67833|6132|42569|23|1|3|5|14|10|17|18.29|28.71|19.23|161.16|326.91|310.93|488.07|16.34|0.00|4.76|326.91|343.25|331.67|348.01|15.98| +2451821|29187|2451926|12637|1204|1306794|31|21268|98174|67833|6132|42569|53|29|10|3|53|10|62|47.55|87.01|52.20|2158.22|3236.40|2948.10|5394.62|258.91|0.00|2697.00|3236.40|3495.31|5933.40|6192.31|288.30| +2451821|29187|2451904|6605|1204|1306794|31|21268|98174|67833|6132|42569|23|11|7|3|2|10|50|22.47|29.88|21.51|418.50|1075.50|1123.50|1494.00|10.75|0.00|702.00|1075.50|1086.25|1777.50|1788.25|-48.00| +2451821|29187|2451878|10915|1204|1306794|31|21268|98174|67833|6132|42569|20|8|19|3|197|10|77|32.04|54.78|23.00|2447.06|1771.00|2467.08|4218.06|88.55|0.00|1771.00|1771.00|1859.55|3542.00|3630.55|-696.08| +2451821|29187|2451828|2450|1204|1306794|31|21268|98174|67833|6132|42569|14|5|8|5|25|10|5|32.51|88.10|0.00|440.50|0.00|162.55|440.50|0.00|0.00|92.50|0.00|0.00|92.50|92.50|-162.55| +2451879|60291|2451988|11383|77402|480194|5386|17778|12527|1601689|4554|4946|38|11|2|3|61|11|90|7.65|11.16|5.91|472.50|531.90|688.50|1004.40|10.63|0.00|0.00|531.90|542.53|531.90|542.53|-156.60| +2451879|60291|2451965|5690|77402|480194|5386|17778|12527|1601689|4554|4946|55|11|11|4|104|11|80|17.07|40.28|24.57|1256.80|1965.60|1365.60|3222.40|137.59|0.00|1385.60|1965.60|2103.19|3351.20|3488.79|600.00| +2451879|60291|2451917|1729|77402|480194|5386|17778|12527|1601689|4554|4946|47|11|8|5|162|11|84|74.02|184.30|49.76|11301.36|4179.84|6217.68|15481.20|83.59|0.00|1392.72|4179.84|4263.43|5572.56|5656.15|-2037.84| +2451879|60291|2451963|6401|77402|480194|5386|17778|12527|1601689|4554|4946|35|19|2|4|143|11|35|84.47|192.59|136.73|1955.10|4785.55|2956.45|6740.65|28.71|4067.71|1347.85|717.84|746.55|2065.69|2094.40|-2238.61| +2451879|60291|2451990|13879|77402|480194|5386|17778|12527|1601689|4554|4946|25|23|11|3|32|11|81|35.78|81.22|30.05|4144.77|2434.05|2898.18|6578.82|0.00|973.62|2630.88|1460.43|1460.43|4091.31|4091.31|-1437.75| +2451879|60291|2451881|16559|77402|480194|5386|17778|12527|1601689|4554|4946|13|14|12|3|57|11|61|23.14|68.26|25.25|2623.61|1540.25|1411.54|4163.86|34.50|677.71|124.44|862.54|897.04|986.98|1021.48|-549.00| +2451879|60291|2451978|11587|77402|480194|5386|17778|12527|1601689|4554|4946|2|20|20|4|278|11|63|37.68|55.76|10.59|2845.71|667.17|2373.84|3512.88|33.35|0.00|983.43|667.17|700.52|1650.60|1683.95|-1706.67| +2451879|60291|2451984|2701|77402|480194|5386|17778|12527|1601689|4554|4946|43|7|16|3|100|11|9|55.58|58.91|49.48|84.87|445.32|500.22|530.19|35.62|0.00|100.71|445.32|480.94|546.03|581.65|-54.90| +2451879|60291|2451928|7520|77402|480194|5386|17778|12527|1601689|4554|4946|26|19|3|1|42|11|91|65.31|146.94|58.77|8023.47|5348.07|5943.21|13371.54|106.96|0.00|3476.20|5348.07|5455.03|8824.27|8931.23|-595.14| +2451879|60291|2451909|8285|77402|480194|5386|17778|12527|1601689|4554|4946|5|1|19|2|235|11|6|50.74|121.77|118.11|21.96|708.66|304.44|730.62|21.25|0.00|21.90|708.66|729.91|730.56|751.81|404.22| +2452251|49599|2452366|13601|96115|1463304|93|17729|4676|601676|3343|35332|17|23|5|3|21|12|88|9.73|21.60|15.98|494.56|1406.24|856.24|1900.80|6.18|1251.55|893.20|154.69|160.87|1047.89|1054.07|-701.55| +2452251|49599|2452351|10743|96115|1463304|93|17729|4676|601676|3343|35332|23|5|4|3|299|12|41|40.16|107.22|42.88|2637.94|1758.08|1646.56|4396.02|158.22|0.00|219.76|1758.08|1916.30|1977.84|2136.06|111.52| +2452251|49599|2452338|97|96115|1463304|93|17729|4676|601676|3343|35332|21|11|4|2|60|12|39|33.74|70.17|20.34|1943.37|793.26|1315.86|2736.63|8.32|674.27|492.57|118.99|127.31|611.56|619.88|-1196.87| +2452251|49599|2452325|1747|96115|1463304|93|17729|4676|601676|3343|35332|55|7|20|3|59|12|80|11.10|11.10|10.10|80.00|808.00|888.00|888.00|8.08|0.00|408.00|808.00|816.08|1216.00|1224.08|-80.00| +2452251|49599|2452359|11851|96115|1463304|93|17729|4676|601676|3343|35332|55|21|4|4|117|12|8|39.57|96.15|17.30|630.80|138.40|316.56|769.20|4.15|0.00|253.76|138.40|142.55|392.16|396.31|-178.16| +2452251|49599|2452369|2425|96115|1463304|93|17729|4676|601676|3343|35332|45|9|20|2|27|12|65|31.71|58.98|13.56|2952.30|881.40|2061.15|3833.70|61.69|0.00|1379.95|881.40|943.09|2261.35|2323.04|-1179.75| +2452251|49599|2452330|10011|96115|1463304|93|17729|4676|601676|3343|35332|19|7|8|4|23|12|26|41.97|101.14|42.47|1525.42|1104.22|1091.22|2629.64|77.29|0.00|157.56|1104.22|1181.51|1261.78|1339.07|13.00| +2452251|49599|2452317|17899|96115|1463304|93|17729|4676|601676|3343|35332|55|11|14|5|83|12|60|36.20|36.92|5.90|1861.20|354.00|2172.00|2215.20|14.16|0.00|243.60|354.00|368.16|597.60|611.76|-1818.00| +2452251|49599|2452338|8655|96115|1463304|93|17729|4676|601676|3343|35332|5|9|17|2|274|12|60|22.51|55.82|22.88|1976.40|1372.80|1350.60|3349.20|27.45|0.00|602.40|1372.80|1400.25|1975.20|2002.65|22.20| +2451351|75714|2451415|5839|62682|719464|6293|46322|46110|1632520|5162|2391|49|1|4|5|86|13|14|61.61|90.56|72.44|253.68|1014.16|862.54|1267.84|40.56|0.00|215.46|1014.16|1054.72|1229.62|1270.18|151.62| +2451351|75714|2451450|15520|62682|719464|6293|46322|46110|1632520|5162|2391|7|13|20|1|39|13|60|99.12|195.26|85.91|6561.00|5154.60|5947.20|11715.60|8.76|4278.31|5506.20|876.29|885.05|6382.49|6391.25|-5070.91| +2451351|75714|2451401|13735|62682|719464|6293|46322|46110|1632520|5162|2391|16|13|5|3|264|13|50|60.92|69.44|26.38|2153.00|1319.00|3046.00|3472.00|118.71|0.00|416.50|1319.00|1437.71|1735.50|1854.21|-1727.00| +2451351|75714|2451432|7954|62682|719464|6293|46322|46110|1632520|5162|2391|58|22|17|4|281|13|33|24.77|39.63|13.47|863.28|444.51|817.41|1307.79|31.11|0.00|326.70|444.51|475.62|771.21|802.32|-372.90| +2451351|75714|2451470|15424|62682|719464|6293|46322|46110|1632520|5162|2391|52|22|14|4|154|13|11|17.78|26.13|25.34|8.69|278.74|195.58|287.43|19.51|0.00|83.27|278.74|298.25|362.01|381.52|83.16| +2451351|75714|2451411|3235|62682|719464|6293|46322|46110|1632520|5162|2391|20|25|12|4|206|13|45|26.47|33.61|11.76|983.25|529.20|1191.15|1512.45|0.37|523.90|604.80|5.30|5.67|610.10|610.47|-1185.85| +2451351|75714|2451442|16711|62682|719464|6293|46322|46110|1632520|5162|2391|32|25|6|4|22|13|78|48.82|83.97|71.37|982.80|5566.86|3807.96|6549.66|55.66|0.00|1113.06|5566.86|5622.52|6679.92|6735.58|1758.90| +2451351|75714|2451377|13172|62682|719464|6293|46322|46110|1632520|5162|2391|13|4|19|2|208|13|23|85.64|108.76|26.10|1901.18|600.30|1969.72|2501.48|36.01|0.00|0.00|600.30|636.31|600.30|636.31|-1369.42| +2451351|75714|2451354|10610|62682|719464|6293|46322|46110|1632520|5162|2391|26|26|7|5|90|13|22|73.91|108.64|56.49|1147.30|1242.78|1626.02|2390.08|14.91|1056.36|740.74|186.42|201.33|927.16|942.07|-1439.60| +2452224|44494|2452292|3255|85452|861000|2948|39343|23682|678237|1587|16335|3|11|10|4|175|14|77|80.72|151.75|100.15|3973.20|7711.55|6215.44|11684.75|154.23|0.00|4907.21|7711.55|7865.78|12618.76|12772.99|1496.11| +2452224|44494|2452253|5667|85452|861000|2948|39343|23682|678237|1587|16335|23|9|18|1|247|14|20|66.38|106.87|101.52|107.00|2030.40|1327.60|2137.40|182.73|0.00|384.60|2030.40|2213.13|2415.00|2597.73|702.80| +2452224|44494|2452337|9797|85452|861000|2948|39343|23682|678237|1587|16335|49|7|6|5|204|14|62|10.82|24.88|2.73|1373.30|169.26|670.84|1542.56|1.69|0.00|107.88|169.26|170.95|277.14|278.83|-501.58| +2452224|44494|2452333|9305|85452|861000|2948|39343|23682|678237|1587|16335|11|25|20|2|184|14|36|78.32|127.66|14.04|4090.32|505.44|2819.52|4595.76|20.21|0.00|1194.84|505.44|525.65|1700.28|1720.49|-2314.08| +2452224|44494|2452272|12933|85452|861000|2948|39343|23682|678237|1587|16335|15|13|9|3|57|14|82|60.20|89.09|68.59|1681.00|5624.38|4936.40|7305.38|281.21|0.00|2702.72|5624.38|5905.59|8327.10|8608.31|687.98| +2452224|44494|2452272|9991|85452|861000|2948|39343|23682|678237|1587|16335|21|9|9|3|47|14|54|18.34|36.12|4.69|1697.22|253.26|990.36|1950.48|10.13|0.00|838.62|253.26|263.39|1091.88|1102.01|-737.10| +2452224|44494|2452322|13531|85452|861000|2948|39343|23682|678237|1587|16335|33|21|18|2|106|14|47|48.25|127.86|52.42|3545.68|2463.74|2267.75|6009.42|73.91|0.00|480.34|2463.74|2537.65|2944.08|3017.99|195.99| +2452224|44494|2452275|17957|85452|861000|2948|39343|23682|678237|1587|16335|35|15|18|3|98|14|21|77.06|97.86|17.61|1685.25|369.81|1618.26|2055.06|7.39|0.00|267.12|369.81|377.20|636.93|644.32|-1248.45| +2452224|44494|2452341|2195|85452|861000|2948|39343|23682|678237|1587|16335|25|17|12|5|138|14|33|69.15|132.07|101.69|1002.54|3355.77|2281.95|4358.31|268.46|0.00|2048.31|3355.77|3624.23|5404.08|5672.54|1073.82| +2452224|44494|2452334|12055|85452|861000|2948|39343|23682|678237|1587|16335|17|13|18|1|24|14|17|39.48|93.56|43.97|843.03|747.49|671.16|1590.52|52.32|0.00|699.72|747.49|799.81|1447.21|1499.53|76.33| +2452224|44494|2452310|14737|85452|861000|2948|39343|23682|678237|1587|16335|23|11|13|2|101|14|89|76.89|180.69|83.11|8684.62|7396.79|6843.21|16081.41|147.93|0.00|3055.37|7396.79|7544.72|10452.16|10600.09|553.58| +2452224|44494|2452252|8823|85452|861000|2948|39343|23682|678237|1587|16335|9|9|15|5|245|14|21|85.86|179.44|111.25|1431.99|2336.25|1803.06|3768.24|70.08|0.00|866.67|2336.25|2406.33|3202.92|3273.00|533.19| +2452224|44494|2452271|13363|85452|861000|2948|39343|23682|678237|1587|16335|25|19|14|3|82|14|63|37.23|80.41|53.87|1672.02|3393.81|2345.49|5065.83|57.01|543.00|1063.44|2850.81|2907.82|3914.25|3971.26|505.32| +2452224|44494|2452326|10351|85452|861000|2948|39343|23682|678237|1587|16335|3|13|2|2|262|14|55|76.31|110.64|61.95|2677.95|3407.25|4197.05|6085.20|68.14|0.00|2920.50|3407.25|3475.39|6327.75|6395.89|-789.80| +2452224|44494|2452334|9733|85452|861000|2948|39343|23682|678237|1587|16335|59|13|14|1|208|14|51|62.40|133.53|26.70|5448.33|1361.70|3182.40|6810.03|81.70|0.00|3404.76|1361.70|1443.40|4766.46|4848.16|-1820.70| +2452224|44494|2452280|2585|85452|861000|2948|39343|23682|678237|1587|16335|21|17|14|2|287|14|95|23.49|46.74|1.86|4263.60|176.70|2231.55|4440.30|7.06|0.00|2220.15|176.70|183.76|2396.85|2403.91|-2054.85| +2451405|67593|2451414|8326|363|3898|586|36874|85171|612056|5032|41320|28|14|11|4|257|15|45|60.01|106.21|105.14|48.15|4731.30|2700.45|4779.45|141.93|0.00|1433.70|4731.30|4873.23|6165.00|6306.93|2030.85| +2451405|67593|2451508|11812|363|3898|586|36874|85171|612056|5032|41320|49|14|4|2|3|15|71|16.54|20.67|6.20|1027.37|440.20|1174.34|1467.57|35.21|0.00|308.14|440.20|475.41|748.34|783.55|-734.14| +2451405|67593|2451459|13489|363|3898|586|36874|85171|612056|5032|41320|4|2|18|4|283|15|21|83.67|190.76|87.74|2163.42|1842.54|1757.07|4005.96|110.55|0.00|1361.85|1842.54|1953.09|3204.39|3314.94|85.47| +2451405|67593|2451517|11924|363|3898|586|36874|85171|612056|5032|41320|56|2|15|3|221|15|56|49.27|104.45|47.00|3217.20|2632.00|2759.12|5849.20|131.60|0.00|2632.00|2632.00|2763.60|5264.00|5395.60|-127.12| +2451405|67593|2451455|12512|363|3898|586|36874|85171|612056|5032|41320|25|19|7|4|91|15|62|32.09|74.76|12.70|3847.72|787.40|1989.58|4635.12|15.74|0.00|2131.56|787.40|803.14|2918.96|2934.70|-1202.18| +2451405|67593|2451490|6355|363|3898|586|36874|85171|612056|5032|41320|31|19|14|5|149|15|57|73.99|181.27|74.32|6096.15|4236.24|4217.43|10332.39|254.17|0.00|3202.83|4236.24|4490.41|7439.07|7693.24|18.81| +2451405|67593|2451449|2542|363|3898|586|36874|85171|612056|5032|41320|1|26|7|5|53|15|100|57.64|69.74|39.75|2999.00|3975.00|5764.00|6974.00|318.00|0.00|139.00|3975.00|4293.00|4114.00|4432.00|-1789.00| +2451405|67593|2451518|4933|363|3898|586|36874|85171|612056|5032|41320|1|26|17|2|120|15|56|16.59|21.73|13.68|450.80|766.08|929.04|1216.88|61.28|0.00|72.80|766.08|827.36|838.88|900.16|-162.96| +2451405|67593|2451454|7576|363|3898|586|36874|85171|612056|5032|41320|7|10|11|3|298|15|28|7.92|14.41|13.11|36.40|367.08|221.76|403.48|3.67|0.00|68.32|367.08|370.75|435.40|439.07|145.32| +2451405|67593|2451506|14248|363|3898|586|36874|85171|612056|5032|41320|44|22|1|3|122|15|94|4.94|12.25|3.06|863.86|287.64|464.36|1151.50|0.00|0.00|79.90|287.64|287.64|367.54|367.54|-176.72| +2451405|67593|2451426|3151|363|3898|586|36874|85171|612056|5032|41320|26|26|14|4|224|15|74|24.48|66.58|64.58|148.00|4778.92|1811.52|4926.92|95.57|0.00|2315.46|4778.92|4874.49|7094.38|7189.95|2967.40| +2451405|67593|2451475|6506|363|3898|586|36874|85171|612056|5032|41320|2|13|15|5|98|15|5|46.13|63.19|1.89|306.50|9.45|230.65|315.95|0.00|9.16|6.30|0.29|0.29|6.59|6.59|-230.36| +2452342|70397|2452347|15786|21567|477008|4096|3937|43522|468964|2307|36195|49|6|1|4|147|16|59|87.34|133.63|44.09|5282.86|2601.31|5153.06|7884.17|208.10|0.00|1813.07|2601.31|2809.41|4414.38|4622.48|-2551.75| +2452342|70397|2452432|16849|21567|477008|4096|3937|43522|468964|2307|36195|15|1|17|5|86|16|37|14.92|22.23|11.11|411.44|411.07|552.04|822.51|10.19|156.20|279.35|254.87|265.06|534.22|544.41|-297.17| +2452342|70397|2452402|5107|21567|477008|4096|3937|43522|468964|2307|36195|42|24|13|4|52|16|54|24.87|64.91|36.99|1507.68|1997.46|1342.98|3505.14|139.82|0.00|1086.48|1997.46|2137.28|3083.94|3223.76|654.48| +2452342|70397|2452406|1146|21567|477008|4096|3937|43522|468964|2307|36195|6|27|8|4|160|16|59|41.64|65.79|55.92|582.33|3299.28|2456.76|3881.61|32.99|0.00|232.46|3299.28|3332.27|3531.74|3564.73|842.52| +2452342|70397|2452379|8895|21567|477008|4096|3937|43522|468964|2307|36195|37|7|6|3|123|16|2|94.72|115.55|80.88|69.34|161.76|189.44|231.10|9.70|0.00|50.84|161.76|171.46|212.60|222.30|-27.68| +2452342|70397|2452425|10447|21567|477008|4096|3937|43522|468964|2307|36195|49|24|16|3|137|16|29|80.40|149.54|44.86|3035.72|1300.94|2331.60|4336.66|91.06|0.00|737.18|1300.94|1392.00|2038.12|2129.18|-1030.66| +2452342|70397|2452378|13224|21567|477008|4096|3937|43522|468964|2307|36195|39|27|18|2|146|16|57|59.56|109.59|31.78|4435.17|1811.46|3394.92|6246.63|108.68|0.00|1124.04|1811.46|1920.14|2935.50|3044.18|-1583.46| +2452342|70397|2452423|5721|21567|477008|4096|3937|43522|468964|2307|36195|36|9|12|4|146|16|22|50.53|55.58|3.33|1149.50|73.26|1111.66|1222.76|0.02|72.52|232.32|0.74|0.76|233.06|233.08|-1110.92| +2452386|7275|2452453|14461|93708|166860|5612|38411|33483|816413|3114|37113|51|30|7|1|245|17|14|28.04|62.52|38.76|332.64|542.64|392.56|875.28|23.17|211.62|437.64|331.02|354.19|768.66|791.83|-61.54| +2452386|7275|2452466|13263|93708|166860|5612|38411|33483|816413|3114|37113|33|7|11|3|115|17|64|53.31|100.22|51.11|3143.04|3271.04|3411.84|6414.08|196.26|0.00|1667.20|3271.04|3467.30|4938.24|5134.50|-140.80| +2452386|7275|2452501|4512|93708|166860|5612|38411|33483|816413|3114|37113|21|25|2|1|280|17|13|98.13|255.13|35.71|2852.46|464.23|1275.69|3316.69|9.28|0.00|1658.28|464.23|473.51|2122.51|2131.79|-811.46| +2452386|7275|2452449|5703|93708|166860|5612|38411|33483|816413|3114|37113|30|30|12|2|239|17|82|34.74|98.66|6.90|7524.32|565.80|2848.68|8090.12|50.92|0.00|3639.98|565.80|616.72|4205.78|4256.70|-2282.88| +2452386|7275|2452413|16303|93708|166860|5612|38411|33483|816413|3114|37113|21|9|9|3|197|17|100|59.08|76.21|10.66|6555.00|1066.00|5908.00|7621.00|63.96|0.00|3505.00|1066.00|1129.96|4571.00|4634.96|-4842.00| +2452386|7275|2452387|6657|93708|166860|5612|38411|33483|816413|3114|37113|21|12|8|3|88|17|20|78.32|151.15|75.57|1511.60|1511.40|1566.40|3023.00|30.22|0.00|665.00|1511.40|1541.62|2176.40|2206.62|-55.00| +2452386|7275|2452404|972|93708|166860|5612|38411|33483|816413|3114|37113|48|9|16|2|188|17|82|78.80|187.54|61.88|10304.12|5074.16|6461.60|15378.28|202.96|0.00|922.50|5074.16|5277.12|5996.66|6199.62|-1387.44| +2452386|7275|2452441|14377|93708|166860|5612|38411|33483|816413|3114|37113|7|19|6|5|21|17|90|79.43|114.37|18.29|8647.20|1646.10|7148.70|10293.30|98.76|0.00|0.00|1646.10|1744.86|1646.10|1744.86|-5502.60| +2452386|7275|2452438|14581|93708|166860|5612|38411|33483|816413|3114|37113|39|18|11|3|232|17|7|58.74|61.67|6.16|388.57|43.12|411.18|431.69|0.97|23.71|176.96|19.41|20.38|196.37|197.34|-391.77| +2451422|33648|2451514|2096|4238|1802467|6743|172|86677|1630659|4106|17182|43|22|19|4|169|18|97|45.06|130.67|53.57|7478.70|5196.29|4370.82|12674.99|103.92|0.00|6337.01|5196.29|5300.21|11533.30|11637.22|825.47| +2451422|33648|2451484|15748|4238|1802467|6743|172|86677|1630659|4106|17182|14|10|1|1|261|18|77|72.83|135.46|27.09|8344.49|2085.93|5607.91|10430.42|0.00|0.00|2294.60|2085.93|2085.93|4380.53|4380.53|-3521.98| +2451422|33648|2451468|667|4238|1802467|6743|172|86677|1630659|4106|17182|49|13|9|1|186|18|70|29.06|60.15|10.82|3453.10|757.40|2034.20|4210.50|8.48|651.36|631.40|106.04|114.52|737.44|745.92|-1928.16| +2451422|33648|2451446|3130|4238|1802467|6743|172|86677|1630659|4106|17182|26|13|20|3|166|18|83|2.78|6.28|4.64|136.12|385.12|230.74|521.24|30.80|0.00|9.96|385.12|415.92|395.08|425.88|154.38| +2451422|33648|2451510|3979|4238|1802467|6743|172|86677|1630659|4106|17182|46|19|6|2|268|18|100|94.73|161.04|27.37|13367.00|2737.00|9473.00|16104.00|82.11|0.00|322.00|2737.00|2819.11|3059.00|3141.11|-6736.00| +2451422|33648|2451488|14911|4238|1802467|6743|172|86677|1630659|4106|17182|40|19|16|5|51|18|12|87.76|200.09|148.06|624.36|1776.72|1053.12|2401.08|35.53|0.00|192.00|1776.72|1812.25|1968.72|2004.25|723.60| +2451422|33648|2451425|8533|4238|1802467|6743|172|86677|1630659|4106|17182|16|2|5|3|131|18|62|40.23|48.67|19.46|1811.02|1206.52|2494.26|3017.54|48.26|0.00|271.56|1206.52|1254.78|1478.08|1526.34|-1287.74| +2451422|33648|2451527|14008|4238|1802467|6743|172|86677|1630659|4106|17182|7|20|8|3|265|18|52|6.61|10.90|8.61|119.08|447.72|343.72|566.80|18.17|188.04|135.72|259.68|277.85|395.40|413.57|-84.04| +2451422|33648|2451429|16834|4238|1802467|6743|172|86677|1630659|4106|17182|32|20|12|3|254|18|78|50.44|89.27|1.78|6824.22|138.84|3934.32|6963.06|8.33|0.00|2018.64|138.84|147.17|2157.48|2165.81|-3795.48| +2452616|42506|2452627|6168|57659|1154202|7098|44794|66274|257323|2776|72|33|12|2|3|137|19|77|45.57|61.97|28.50|2577.19|2194.50|3508.89|4771.69|65.83|0.00|381.15|2194.50|2260.33|2575.65|2641.48|-1314.39| +2452616|42506|2452672|13206|57659|1154202|7098|44794|66274|257323|2776|72|39|6|6|4|42|19|7|89.03|121.08|107.76|93.24|754.32|623.21|847.56|15.08|0.00|364.42|754.32|769.40|1118.74|1133.82|131.11| +2452616|42506|2452629|1614|57659|1154202|7098|44794|66274|257323|2776|72|21|24|16|5|181|19|94|5.59|5.81|4.88|87.42|458.72|525.46|546.14|0.00|0.00|109.04|458.72|458.72|567.76|567.76|-66.74| +2452616|42506|2452679|5625|57659|1154202|7098|44794|66274|257323|2776|72|3|1|10|2|51|19|80|26.60|36.70|0.73|2877.60|58.40|2128.00|2936.00|4.67|0.00|997.60|58.40|63.07|1056.00|1060.67|-2069.60| +2452616|42506|2452710|4725|57659|1154202|7098|44794|66274|257323|2776|72|33|19|3|1|192|19|64|48.61|72.91|10.93|3966.72|699.52|3111.04|4666.24|0.00|125.91|652.80|573.61|573.61|1226.41|1226.41|-2537.43| +2452616|42506|2452631|3513|57659|1154202|7098|44794|66274|257323|2776|72|57|12|16|1|36|19|7|10.98|28.43|25.87|17.92|181.09|76.86|199.01|12.67|0.00|71.61|181.09|193.76|252.70|265.37|104.23| +2452616|42506|2452715|8749|57659|1154202|7098|44794|66274|257323|2776|72|25|30|17|4|21|19|5|12.83|31.17|7.79|116.90|38.95|64.15|155.85|0.77|0.00|71.65|38.95|39.72|110.60|111.37|-25.20| +2452616|42506|2452730|7059|57659|1154202|7098|44794|66274|257323|2776|72|45|13|1|5|229|19|13|24.50|69.09|68.39|9.10|889.07|318.50|898.17|8.89|0.00|8.97|889.07|897.96|898.04|906.93|570.57| +2452616|42506|2452692|14997|57659|1154202|7098|44794|66274|257323|2776|72|37|9|1|2|203|19|51|86.10|102.45|61.47|2089.98|3134.97|4391.10|5224.95|31.34|0.00|1149.03|3134.97|3166.31|4284.00|4315.34|-1256.13| +2452616|42506|2452728|9345|57659|1154202|7098|44794|66274|257323|2776|72|43|13|12|2|89|19|51|7.02|9.89|5.73|212.16|292.23|358.02|504.39|2.92|0.00|166.26|292.23|295.15|458.49|461.41|-65.79| +2452616|42506|2452721|6288|57659|1154202|7098|44794|66274|257323|2776|72|19|24|4|3|265|19|52|41.66|112.06|45.94|3438.24|2388.88|2166.32|5827.12|71.66|0.00|465.92|2388.88|2460.54|2854.80|2926.46|222.56| +2452616|42506|2452653|17221|57659|1154202|7098|44794|66274|257323|2776|72|13|25|9|5|121|19|100|2.60|4.13|0.41|372.00|41.00|260.00|413.00|2.05|0.00|152.00|41.00|43.05|193.00|195.05|-219.00| +2452616|42506|2452639|7623|57659|1154202|7098|44794|66274|257323|2776|72|55|1|1|4|84|19|24|24.55|49.10|12.76|872.16|306.24|589.20|1178.40|6.12|0.00|365.28|306.24|312.36|671.52|677.64|-282.96| +2452616|42506|2452665|9475|57659|1154202|7098|44794|66274|257323|2776|72|27|27|7|2|300|19|62|41.84|122.59|84.58|2356.62|5243.96|2594.08|7600.58|367.07|0.00|2736.06|5243.96|5611.03|7980.02|8347.09|2649.88| +2452616|42506|2452725|17175|57659|1154202|7098|44794|66274|257323|2776|72|37|7|9|4|169|19|43|26.54|70.06|25.92|1898.02|1114.56|1141.22|3012.58|11.14|0.00|1174.76|1114.56|1125.70|2289.32|2300.46|-26.66| +2451576|44795|2451594|10861|76619|942296|2020|13697|44747|484545|5528|1605|19|5|19|2|275|20|68|48.01|52.81|7.92|3052.52|538.56|3264.68|3591.08|48.47|0.00|1328.04|538.56|587.03|1866.60|1915.07|-2726.12| +2451576|44795|2451679|11627|76619|942296|2020|13697|44747|484545|5528|1605|43|25|14|5|129|20|55|7.39|18.03|5.22|704.55|287.10|406.45|991.65|17.22|0.00|277.20|287.10|304.32|564.30|581.52|-119.35| +2451576|44795|2451598|9169|76619|942296|2020|13697|44747|484545|5528|1605|11|23|8|4|7|20|15|73.98|221.94|137.60|1265.10|2064.00|1109.70|3329.10|61.92|0.00|366.15|2064.00|2125.92|2430.15|2492.07|954.30| +2451576|44795|2451690|16862|76619|942296|2020|13697|44747|484545|5528|1605|49|2|19|1|90|20|81|53.11|152.42|74.68|6296.94|6049.08|4301.91|12346.02|60.49|0.00|6049.08|6049.08|6109.57|12098.16|12158.65|1747.17| +2451576|44795|2451607|17935|76619|942296|2020|13697|44747|484545|5528|1605|8|7|13|2|98|20|1|59.35|135.31|13.53|121.78|13.53|59.35|135.31|1.21|0.00|60.88|13.53|14.74|74.41|75.62|-45.82| +2451576|44795|2451633|12731|76619|942296|2020|13697|44747|484545|5528|1605|7|13|20|3|46|20|69|81.98|237.74|16.64|15255.90|1148.16|5656.62|16404.06|103.33|0.00|5413.05|1148.16|1251.49|6561.21|6664.54|-4508.46| +2451576|44795|2451644|12902|76619|942296|2020|13697|44747|484545|5528|1605|35|7|20|1|52|20|97|5.42|13.49|0.00|1308.53|0.00|525.74|1308.53|0.00|0.00|549.02|0.00|0.00|549.02|549.02|-525.74| +2451576|44795|2451629|4214|76619|942296|2020|13697|44747|484545|5528|1605|38|7|8|2|286|20|93|30.19|47.70|19.08|2661.66|1774.44|2807.67|4436.10|29.27|798.49|2173.41|975.95|1005.22|3149.36|3178.63|-1831.72| +2451576|44795|2451682|182|76619|942296|2020|13697|44747|484545|5528|1605|29|14|14|2|220|20|98|73.29|199.34|63.78|13284.88|6250.44|7182.42|19535.32|125.00|0.00|5078.36|6250.44|6375.44|11328.80|11453.80|-931.98| +2451576|44795|2451601|13207|76619|942296|2020|13697|44747|484545|5528|1605|55|8|2|4|206|20|26|61.93|114.57|25.20|2323.62|655.20|1610.18|2978.82|39.31|0.00|804.18|655.20|694.51|1459.38|1498.69|-954.98| +2451576|44795|2451641|17396|76619|942296|2020|13697|44747|484545|5528|1605|49|17|13|2|111|20|36|38.67|100.15|76.11|865.44|2739.96|1392.12|3605.40|219.19|0.00|1153.44|2739.96|2959.15|3893.40|4112.59|1347.84| +2451576|44795|2451586|11947|76619|942296|2020|13697|44747|484545|5528|1605|49|26|20|4|25|20|11|9.24|13.95|0.13|152.02|1.43|101.64|153.45|0.04|0.00|16.83|1.43|1.47|18.26|18.30|-100.21| +2452580|26067|2452637|8988|94484|771125|5366|44407|94484|771125|5366|44407|9|18|20|4|208|21|96|95.27|191.49|44.04|14155.20|4227.84|9145.92|18383.04|253.67|0.00|1469.76|4227.84|4481.51|5697.60|5951.27|-4918.08| +2452580|26067|2452626|4747|94484|771125|5366|44407|94484|771125|5366|44407|55|21|14|5|13|21|54|40.63|63.38|62.11|68.58|3353.94|2194.02|3422.52|167.69|0.00|684.18|3353.94|3521.63|4038.12|4205.81|1159.92| +2452580|26067|2452696|5886|94484|771125|5366|44407|94484|771125|5366|44407|27|27|7|2|30|21|25|51.53|53.07|19.10|849.25|477.50|1288.25|1326.75|15.66|85.95|344.75|391.55|407.21|736.30|751.96|-896.70| +2452580|26067|2452613|12723|94484|771125|5366|44407|94484|771125|5366|44407|24|9|5|5|28|21|33|29.50|71.98|1.43|2328.15|47.19|973.50|2375.34|0.00|0.00|23.43|47.19|47.19|70.62|70.62|-926.31| +2452580|26067|2452625|297|94484|771125|5366|44407|94484|771125|5366|44407|48|12|1|3|125|21|84|4.13|4.25|3.23|85.68|271.32|346.92|357.00|5.42|0.00|85.68|271.32|276.74|357.00|362.42|-75.60| +2452580|26067|2452665|2941|94484|771125|5366|44407|94484|771125|5366|44407|25|24|8|2|293|21|39|43.71|47.64|33.82|538.98|1318.98|1704.69|1857.96|10.28|290.17|408.72|1028.81|1039.09|1437.53|1447.81|-675.88| +2452580|26067|2452661|16111|94484|771125|5366|44407|94484|771125|5366|44407|33|15|16|4|182|21|21|14.11|36.54|16.80|414.54|352.80|296.31|767.34|28.22|0.00|329.91|352.80|381.02|682.71|710.93|56.49| +2452580|26067|2452687|15121|94484|771125|5366|44407|94484|771125|5366|44407|13|6|19|5|51|21|77|92.60|99.08|13.87|6561.17|1067.99|7130.20|7629.16|67.49|224.27|2440.90|843.72|911.21|3284.62|3352.11|-6286.48| +2452580|26067|2452586|16717|94484|771125|5366|44407|94484|771125|5366|44407|42|25|12|4|273|21|41|20.34|39.05|28.50|432.55|1168.50|833.94|1601.05|105.16|0.00|0.00|1168.50|1273.66|1168.50|1273.66|334.56| +2451176|80914|2451251|6176|11428|674591|5401|8164|93398|771622|4900|42910|22|28|1|2|153|22|94|11.47|30.62|2.75|2619.78|258.50|1078.18|2878.28|10.34|0.00|517.94|258.50|268.84|776.44|786.78|-819.68| +2451176|80914|2451181|16384|11428|674591|5401|8164|93398|771622|4900|42910|40|4|15|2|249|22|73|70.12|100.27|56.15|3220.76|4098.95|5118.76|7319.71|204.94|0.00|1170.92|4098.95|4303.89|5269.87|5474.81|-1019.81| +2451176|80914|2451197|2276|11428|674591|5401|8164|93398|771622|4900|42910|22|28|18|1|264|22|60|28.39|38.61|38.22|23.40|2293.20|1703.40|2316.60|45.86|0.00|648.60|2293.20|2339.06|2941.80|2987.66|589.80| +2451176|80914|2451241|9493|11428|674591|5401|8164|93398|771622|4900|42910|58|20|19|5|208|22|48|25.97|61.28|60.66|29.76|2911.68|1246.56|2941.44|145.58|0.00|911.52|2911.68|3057.26|3823.20|3968.78|1665.12| +2451176|80914|2451252|2288|11428|674591|5401|8164|93398|771622|4900|42910|52|10|8|5|79|22|23|17.12|49.30|31.05|419.75|714.15|393.76|1133.90|0.00|0.00|238.05|714.15|714.15|952.20|952.20|320.39| +2451176|80914|2451245|338|11428|674591|5401|8164|93398|771622|4900|42910|20|26|10|3|189|22|77|46.14|114.42|62.93|3964.73|4845.61|3552.78|8810.34|145.36|0.00|880.88|4845.61|4990.97|5726.49|5871.85|1292.83| +2451176|80914|2451188|10759|11428|674591|5401|8164|93398|771622|4900|42910|22|16|16|3|120|22|61|54.61|82.46|8.24|4527.42|502.64|3331.21|5030.06|13.47|165.87|1156.56|336.77|350.24|1493.33|1506.80|-2994.44| +2451176|80914|2451243|3160|11428|674591|5401|8164|93398|771622|4900|42910|46|1|18|5|232|22|56|31.95|75.08|24.02|2859.36|1345.12|1789.20|4204.48|80.70|0.00|1513.12|1345.12|1425.82|2858.24|2938.94|-444.08| +2451176|80914|2451214|7922|11428|674591|5401|8164|93398|771622|4900|42910|2|16|16|5|67|22|17|12.11|19.98|7.19|217.43|122.23|205.87|339.66|9.77|0.00|108.63|122.23|132.00|230.86|240.63|-83.64| +2451176|80914|2451252|7130|11428|674591|5401|8164|93398|771622|4900|42910|34|28|5|2|292|22|95|93.80|153.83|0.00|14613.85|0.00|8911.00|14613.85|0.00|0.00|4529.60|0.00|0.00|4529.60|4529.60|-8911.00| +2451176|80914|2451265|8936|11428|674591|5401|8164|93398|771622|4900|42910|38|10|5|4|10|22|69|60.00|169.20|59.22|7588.62|4086.18|4140.00|11674.80|94.79|1716.19|2451.57|2369.99|2464.78|4821.56|4916.35|-1770.01| +2451176|80914|2451205|4015|11428|674591|5401|8164|93398|771622|4900|42910|52|10|12|1|160|22|41|16.08|47.75|14.32|1370.63|587.12|659.28|1957.75|46.96|0.00|489.13|587.12|634.08|1076.25|1123.21|-72.16| +2451176|80914|2451184|5860|11428|674591|5401|8164|93398|771622|4900|42910|44|16|15|3|34|22|96|75.08|191.45|72.75|11395.20|6984.00|7207.68|18379.20|139.68|0.00|8453.76|6984.00|7123.68|15437.76|15577.44|-223.68| +2451103|82709|2451163|2984|49487|1494913|1492|10871|87709|790511|3455|14834|32|28|3|1|279|23|28|93.59|181.56|137.98|1220.24|3863.44|2620.52|5083.68|0.00|0.00|1575.84|3863.44|3863.44|5439.28|5439.28|1242.92| +2451103|82709|2451211|3211|49487|1494913|1492|10871|87709|790511|3455|14834|52|14|16|4|85|23|17|76.74|177.26|141.80|602.82|2410.60|1304.58|3013.42|216.95|0.00|903.89|2410.60|2627.55|3314.49|3531.44|1106.02| +2451103|82709|2451194|13238|49487|1494913|1492|10871|87709|790511|3455|14834|40|14|15|2|269|23|65|72.31|84.60|21.99|4069.65|1429.35|4700.15|5499.00|85.76|0.00|2529.15|1429.35|1515.11|3958.50|4044.26|-3270.80| +2451103|82709|2451122|17275|49487|1494913|1492|10871|87709|790511|3455|14834|19|19|20|5|227|23|98|28.06|46.57|35.39|1095.64|3468.22|2749.88|4563.86|0.00|0.00|1003.52|3468.22|3468.22|4471.74|4471.74|718.34| +2451103|82709|2451150|9319|49487|1494913|1492|10871|87709|790511|3455|14834|40|14|5|2|299|23|44|26.04|27.34|9.29|794.20|408.76|1145.76|1202.96|36.78|0.00|384.56|408.76|445.54|793.32|830.10|-737.00| +2451103|82709|2451181|15206|49487|1494913|1492|10871|87709|790511|3455|14834|13|20|5|2|136|23|43|97.87|275.01|239.25|1537.68|10287.75|4208.41|11825.43|102.87|0.00|1773.75|10287.75|10390.62|12061.50|12164.37|6079.34| +2451103|82709|2451210|17566|49487|1494913|1492|10871|87709|790511|3455|14834|52|7|11|3|55|23|66|39.45|108.09|19.45|5850.24|1283.70|2603.70|7133.94|51.34|0.00|998.58|1283.70|1335.04|2282.28|2333.62|-1320.00| +2451103|82709|2451204|10165|49487|1494913|1492|10871|87709|790511|3455|14834|56|25|4|3|212|23|67|73.95|91.69|57.76|2273.31|3869.92|4954.65|6143.23|154.79|0.00|2579.50|3869.92|4024.71|6449.42|6604.21|-1084.73| +2451103|82709|2451209|10820|49487|1494913|1492|10871|87709|790511|3455|14834|14|10|11|4|224|23|89|30.66|80.94|1.61|7060.37|143.29|2728.74|7203.66|2.86|0.00|575.83|143.29|146.15|719.12|721.98|-2585.45| +2451103|82709|2451197|5684|49487|1494913|1492|10871|87709|790511|3455|14834|1|19|12|4|46|23|42|83.16|216.21|30.26|7809.90|1270.92|3492.72|9080.82|50.83|0.00|272.16|1270.92|1321.75|1543.08|1593.91|-2221.80| +2451103|82709|2451149|17750|49487|1494913|1492|10871|87709|790511|3455|14834|34|22|6|3|137|23|69|57.14|124.56|39.85|5844.99|2749.65|3942.66|8594.64|27.49|0.00|2320.47|2749.65|2777.14|5070.12|5097.61|-1193.01| +2451103|82709|2451165|13312|49487|1494913|1492|10871|87709|790511|3455|14834|7|19|6|5|21|23|35|76.29|226.58|149.54|2696.40|5233.90|2670.15|7930.30|366.37|0.00|1110.20|5233.90|5600.27|6344.10|6710.47|2563.75| +2451103|82709|2451200|6910|49487|1494913|1492|10871|87709|790511|3455|14834|32|13|20|3|226|23|92|28.87|66.11|57.51|791.20|5290.92|2656.04|6082.12|476.18|0.00|1398.40|5290.92|5767.10|6689.32|7165.50|2634.88| +2451103|82709|2451158|8020|49487|1494913|1492|10871|87709|790511|3455|14834|20|8|9|3|10|23|40|55.03|69.88|69.88|0.00|2795.20|2201.20|2795.20|195.66|0.00|866.40|2795.20|2990.86|3661.60|3857.26|594.00| +2451103|82709|2451178|494|49487|1494913|1492|10871|87709|790511|3455|14834|32|8|16|4|232|23|11|16.33|23.84|20.74|34.10|228.14|179.63|262.24|0.00|0.00|65.56|228.14|228.14|293.70|293.70|48.51| +2451103|82709|2451198|15157|49487|1494913|1492|10871|87709|790511|3455|14834|19|7|5|4|240|23|95|24.25|45.83|4.12|3962.45|391.40|2303.75|4353.85|31.31|0.00|1131.45|391.40|422.71|1522.85|1554.16|-1912.35| +2452102|35718|2452189|141|9167|1219556|4698|9887|29071|1421648|5454|5396|19|3|1|5|269|24|89|72.85|135.50|89.43|4100.23|7959.27|6483.65|12059.50|557.14|0.00|2532.05|7959.27|8516.41|10491.32|11048.46|1475.62| +2452102|35718|2452140|1327|9167|1219556|4698|9887|29071|1421648|5454|5396|59|27|11|5|139|24|61|63.48|128.22|57.69|4302.33|3519.09|3872.28|7821.42|70.38|0.00|3050.00|3519.09|3589.47|6569.09|6639.47|-353.19| +2452102|35718|2452179|247|9167|1219556|4698|9887|29071|1421648|5454|5396|19|25|6|4|172|24|52|57.06|81.02|34.02|2444.00|1769.04|2967.12|4213.04|7.43|1662.89|1853.28|106.15|113.58|1959.43|1966.86|-2860.97| +2452102|35718|2452105|4679|9167|1219556|4698|9887|29071|1421648|5454|5396|5|29|7|4|111|24|92|62.22|138.75|79.08|5489.64|7275.36|5724.24|12765.00|436.52|0.00|1403.92|7275.36|7711.88|8679.28|9115.80|1551.12| +2452102|35718|2452222|13961|9167|1219556|4698|9887|29071|1421648|5454|5396|41|5|5|4|286|24|11|28.90|45.95|11.94|374.11|131.34|317.90|505.45|3.94|0.00|4.95|131.34|135.28|136.29|140.23|-186.56| +2452102|35718|2452214|4113|9167|1219556|4698|9887|29071|1421648|5454|5396|5|29|20|5|277|24|72|14.58|32.22|24.48|557.28|1762.56|1049.76|2319.84|105.75|0.00|69.12|1762.56|1868.31|1831.68|1937.43|712.80| +2452102|35718|2452157|5759|9167|1219556|4698|9887|29071|1421648|5454|5396|15|1|4|5|123|24|70|26.61|54.55|0.00|3818.50|0.00|1862.70|3818.50|0.00|0.00|801.50|0.00|0.00|801.50|801.50|-1862.70| +2452102|35718|2452188|14839|9167|1219556|4698|9887|29071|1421648|5454|5396|9|1|14|2|40|24|94|17.65|48.18|22.64|2400.76|2128.16|1659.10|4528.92|37.24|1596.12|1720.20|532.04|569.28|2252.24|2289.48|-1127.06| +2452102|35718|2452207|5397|9167|1219556|4698|9887|29071|1421648|5454|5396|11|29|1|5|79|24|87|43.68|63.33|23.43|3471.30|2038.41|3800.16|5509.71|11.00|937.66|1873.11|1100.75|1111.75|2973.86|2984.86|-2699.41| +2452102|35718|2452116|17291|9167|1219556|4698|9887|29071|1421648|5454|5396|45|9|17|4|244|24|5|90.67|216.70|82.34|671.80|411.70|453.35|1083.50|12.14|168.79|21.65|242.91|255.05|264.56|276.70|-210.44| +2452102|35718|2452214|12895|9167|1219556|4698|9887|29071|1421648|5454|5396|27|25|5|4|116|24|51|97.78|158.40|128.30|1535.10|6543.30|4986.78|8078.40|588.89|0.00|2261.85|6543.30|7132.19|8805.15|9394.04|1556.52| +2452102|35718|2452138|4231|9167|1219556|4698|9887|29071|1421648|5454|5396|13|21|5|1|120|24|31|41.49|90.86|33.61|1774.75|1041.91|1286.19|2816.66|52.09|0.00|27.90|1041.91|1094.00|1069.81|1121.90|-244.28| +2452102|35718|2452111|13283|9167|1219556|4698|9887|29071|1421648|5454|5396|49|25|4|2|30|24|37|21.11|39.89|31.51|310.06|1165.87|781.07|1475.93|69.95|0.00|545.75|1165.87|1235.82|1711.62|1781.57|384.80| +2452102|35718|2452114|2645|9167|1219556|4698|9887|29071|1421648|5454|5396|39|5|3|1|289|24|77|48.80|136.15|34.03|7863.24|2620.31|3757.60|10483.55|235.82|0.00|4193.42|2620.31|2856.13|6813.73|7049.55|-1137.29| +2452377|62495|2452440|10213|47151|1504206|2061|34363|10761|431235|5575|39430|6|6|3|3|63|25|32|19.66|22.21|2.22|639.68|71.04|629.12|710.72|5.68|0.00|42.56|71.04|76.72|113.60|119.28|-558.08| +2452377|62495|2452409|7221|47151|1504206|2061|34363|10761|431235|5575|39430|21|13|20|3|99|25|2|58.24|112.98|76.82|72.32|153.64|116.48|225.96|10.75|0.00|2.24|153.64|164.39|155.88|166.63|37.16| +2452377|62495|2452469|5673|47151|1504206|2061|34363|10761|431235|5575|39430|12|27|2|5|285|25|43|61.06|164.86|112.10|2268.68|4820.30|2625.58|7088.98|0.00|0.00|425.27|4820.30|4820.30|5245.57|5245.57|2194.72| +2452377|62495|2452471|7197|47151|1504206|2061|34363|10761|431235|5575|39430|30|27|3|2|138|25|12|67.06|80.47|64.37|193.20|772.44|804.72|965.64|26.57|108.14|463.44|664.30|690.87|1127.74|1154.31|-140.42| +2452377|62495|2452475|3379|47151|1504206|2061|34363|10761|431235|5575|39430|7|13|17|2|127|25|26|30.08|45.12|24.81|528.06|645.06|782.08|1173.12|29.35|225.77|410.54|419.29|448.64|829.83|859.18|-362.79| +2452377|62495|2452378|5790|47151|1504206|2061|34363|10761|431235|5575|39430|33|3|20|5|190|25|73|50.03|124.57|28.65|7002.16|2091.45|3652.19|9093.61|104.57|0.00|818.33|2091.45|2196.02|2909.78|3014.35|-1560.74| +2452377|62495|2452460|5571|47151|1504206|2061|34363|10761|431235|5575|39430|21|3|6|5|231|25|22|88.96|152.12|3.04|3279.76|66.88|1957.12|3346.64|6.01|0.00|1639.66|66.88|72.89|1706.54|1712.55|-1890.24| +2452377|62495|2452441|13332|47151|1504206|2061|34363|10761|431235|5575|39430|7|1|8|5|232|25|50|57.17|160.07|67.22|4642.50|3361.00|2858.50|8003.50|33.61|0.00|1440.50|3361.00|3394.61|4801.50|4835.11|502.50| +2452377|62495|2452452|4761|47151|1504206|2061|34363|10761|431235|5575|39430|13|30|16|4|295|25|93|88.00|150.48|109.85|3778.59|10216.05|8184.00|13994.64|0.00|0.00|2658.87|10216.05|10216.05|12874.92|12874.92|2032.05| +2452377|62495|2452437|4549|47151|1504206|2061|34363|10761|431235|5575|39430|36|3|16|2|81|25|29|68.48|169.14|3.38|4807.04|98.02|1985.92|4905.06|1.00|81.35|931.77|16.67|17.67|948.44|949.44|-1969.25| +2452377|62495|2452441|4833|47151|1504206|2061|34363|10761|431235|5575|39430|49|25|1|1|134|25|78|82.00|89.38|54.52|2719.08|4252.56|6396.00|6971.64|212.62|0.00|2369.64|4252.56|4465.18|6622.20|6834.82|-2143.44| +2452377|62495|2452425|7099|47151|1504206|2061|34363|10761|431235|5575|39430|24|21|13|5|204|25|47|60.04|144.69|34.72|5168.59|1631.84|2821.88|6800.43|130.54|0.00|1359.71|1631.84|1762.38|2991.55|3122.09|-1190.04| +2452377|62495|2452483|5472|47151|1504206|2061|34363|10761|431235|5575|39430|51|30|14|3|252|25|1|78.44|228.26|15.97|212.29|15.97|78.44|228.26|1.18|1.11|95.86|14.86|16.04|110.72|111.90|-63.58| +2451287|82606|2451312|3586|95407|970818|2582|4346|54186|951760|2438|21402|2|2|12|2|32|26|81|22.29|40.79|33.03|628.56|2675.43|1805.49|3303.99|80.26|0.00|1090.26|2675.43|2755.69|3765.69|3845.95|869.94| +2451287|82606|2451303|1528|95407|970818|2582|4346|54186|951760|2438|21402|56|7|9|4|169|26|92|38.04|101.94|61.16|3751.76|5626.72|3499.68|9378.48|56.26|0.00|3938.52|5626.72|5682.98|9565.24|9621.50|2127.04| +2451287|82606|2451343|2494|95407|970818|2582|4346|54186|951760|2438|21402|25|8|7|4|228|26|55|50.21|55.73|16.16|2176.35|888.80|2761.55|3065.15|35.55|0.00|1501.50|888.80|924.35|2390.30|2425.85|-1872.75| +2451287|82606|2451355|1042|95407|970818|2582|4346|54186|951760|2438|21402|49|4|14|5|96|26|85|71.87|176.80|14.14|13826.10|1201.90|6108.95|15028.00|48.07|0.00|6160.80|1201.90|1249.97|7362.70|7410.77|-4907.05| +2451287|82606|2451295|5878|95407|970818|2582|4346|54186|951760|2438|21402|19|16|5|4|140|26|31|64.70|84.75|55.93|893.42|1733.83|2005.70|2627.25|104.02|0.00|604.19|1733.83|1837.85|2338.02|2442.04|-271.87| +2451287|82606|2451304|8710|95407|970818|2582|4346|54186|951760|2438|21402|16|1|10|2|171|26|34|56.10|141.37|52.30|3028.38|1778.20|1907.40|4806.58|34.14|640.15|1393.66|1138.05|1172.19|2531.71|2565.85|-769.35| +2451287|82606|2451360|13885|95407|970818|2582|4346|54186|951760|2438|21402|2|28|8|4|257|26|79|13.35|18.28|10.23|635.95|808.17|1054.65|1444.12|0.00|783.92|173.01|24.25|24.25|197.26|197.26|-1030.40| +2451287|82606|2451395|3788|95407|970818|2582|4346|54186|951760|2438|21402|2|14|14|5|176|26|15|56.43|62.07|41.58|307.35|623.70|846.45|931.05|18.21|168.39|102.30|455.31|473.52|557.61|575.82|-391.14| +2451287|82606|2451392|17788|95407|970818|2582|4346|54186|951760|2438|21402|49|8|5|5|275|26|6|33.83|99.12|25.77|440.10|154.62|202.98|594.72|12.36|0.00|285.42|154.62|166.98|440.04|452.40|-48.36| +2451287|82606|2451399|2090|95407|970818|2582|4346|54186|951760|2438|21402|46|26|1|1|208|26|69|88.19|179.90|8.99|11792.79|620.31|6085.11|12413.10|6.20|0.00|2854.53|620.31|626.51|3474.84|3481.04|-5464.80| +2451287|82606|2451405|10462|95407|970818|2582|4346|54186|951760|2438|21402|58|2|11|3|82|26|12|16.74|43.69|13.54|361.80|162.48|200.88|524.28|9.74|0.00|230.64|162.48|172.22|393.12|402.86|-38.40| +2451287|82606|2451362|4612|95407|970818|2582|4346|54186|951760|2438|21402|52|28|20|3|263|26|3|89.46|104.66|36.63|204.09|109.89|268.38|313.98|9.89|0.00|119.31|109.89|119.78|229.20|239.09|-158.49| +2451287|82606|2451407|15325|95407|970818|2582|4346|54186|951760|2438|21402|1|28|12|4|283|26|56|42.81|65.92|36.25|1661.52|2030.00|2397.36|3691.52|20.30|0.00|1291.92|2030.00|2050.30|3321.92|3342.22|-367.36| +2451287|82606|2451342|16459|95407|970818|2582|4346|54186|951760|2438|21402|32|16|8|5|149|26|54|67.29|91.51|90.59|49.68|4891.86|3633.66|4941.54|195.67|0.00|1531.44|4891.86|5087.53|6423.30|6618.97|1258.20| +2451287|82606|2451317|14662|95407|970818|2582|4346|54186|951760|2438|21402|55|20|17|3|246|26|36|47.37|132.16|9.25|4424.76|333.00|1705.32|4757.76|26.64|0.00|1950.48|333.00|359.64|2283.48|2310.12|-1372.32| +2451287|82606|2451306|5942|95407|970818|2582|4346|54186|951760|2438|21402|13|28|10|3|173|26|44|62.57|162.05|9.72|6702.52|427.68|2753.08|7130.20|17.10|0.00|213.84|427.68|444.78|641.52|658.62|-2325.40| +2450884|35444|2450962|8684|31979|1234440|4901|13715|46998|1484245|5851|33912|52|28|13|4|182|27|33|94.47|225.78|56.44|5588.22|1862.52|3117.51|7450.74|55.13|484.25|1191.96|1378.27|1433.40|2570.23|2625.36|-1739.24| +2450884|35444|2450948|10546|31979|1234440|4901|13715|46998|1484245|5851|33912|50|4|17|4|278|27|2|42.83|116.92|12.86|208.12|25.72|85.66|233.84|2.31|0.00|56.12|25.72|28.03|81.84|84.15|-59.94| +2450884|35444|2450918|14170|31979|1234440|4901|13715|46998|1484245|5851|33912|20|8|9|3|197|27|77|28.35|44.22|2.21|3234.77|170.17|2182.95|3404.94|0.00|0.00|135.52|170.17|170.17|305.69|305.69|-2012.78| +2450884|35444|2450928|12086|31979|1234440|4901|13715|46998|1484245|5851|33912|56|4|14|5|59|27|11|27.78|36.94|4.80|353.54|52.80|305.58|406.34|4.75|0.00|186.89|52.80|57.55|239.69|244.44|-252.78| +2450884|35444|2450947|11474|31979|1234440|4901|13715|46998|1484245|5851|33912|22|4|14|3|29|27|15|47.22|76.49|64.25|183.60|963.75|708.30|1147.35|0.00|0.00|195.00|963.75|963.75|1158.75|1158.75|255.45| +2450884|35444|2450888|6254|31979|1234440|4901|13715|46998|1484245|5851|33912|31|20|9|4|259|27|36|8.88|15.00|11.70|118.80|421.20|319.68|540.00|25.27|0.00|178.20|421.20|446.47|599.40|624.67|101.52| +2450884|35444|2450937|8206|31979|1234440|4901|13715|46998|1484245|5851|33912|14|13|6|3|207|27|86|71.43|127.14|16.52|9513.32|1420.72|6142.98|10934.04|28.41|0.00|983.84|1420.72|1449.13|2404.56|2432.97|-4722.26| +2450884|35444|2450889|2260|31979|1234440|4901|13715|46998|1484245|5851|33912|13|1|5|2|246|27|61|21.60|28.29|25.17|190.32|1535.37|1317.60|1725.69|15.35|0.00|275.72|1535.37|1550.72|1811.09|1826.44|217.77| +2450884|35444|2450940|12544|31979|1234440|4901|13715|46998|1484245|5851|33912|8|26|3|5|4|27|25|49.73|70.61|34.59|900.50|864.75|1243.25|1765.25|34.59|0.00|229.25|864.75|899.34|1094.00|1128.59|-378.50| +2450884|35444|2450984|13010|31979|1234440|4901|13715|46998|1484245|5851|33912|2|22|9|4|266|27|49|32.26|71.93|69.05|141.12|3383.45|1580.74|3524.57|23.68|1015.03|140.63|2368.42|2392.10|2509.05|2532.73|787.68| +2450884|35444|2450914|2648|31979|1234440|4901|13715|46998|1484245|5851|33912|22|13|2|5|225|27|51|30.34|39.13|13.69|1297.44|698.19|1547.34|1995.63|55.85|0.00|0.00|698.19|754.04|698.19|754.04|-849.15| +2450884|35444|2450887|17770|31979|1234440|4901|13715|46998|1484245|5851|33912|56|28|1|2|3|27|67|35.92|65.01|5.85|3963.72|391.95|2406.64|4355.67|7.83|0.00|1175.85|391.95|399.78|1567.80|1575.63|-2014.69| +2450884|35444|2450927|9118|31979|1234440|4901|13715|46998|1484245|5851|33912|8|13|12|4|216|27|66|68.00|85.68|40.26|2997.72|2657.16|4488.00|5654.88|239.14|0.00|678.48|2657.16|2896.30|3335.64|3574.78|-1830.84| +2450884|35444|2450929|7588|31979|1234440|4901|13715|46998|1484245|5851|33912|25|1|16|4|2|27|11|4.36|10.07|2.21|86.46|24.31|47.96|110.77|1.21|0.00|6.60|24.31|25.52|30.91|32.12|-23.65| +2450884|35444|2450973|17959|31979|1234440|4901|13715|46998|1484245|5851|33912|8|22|16|5|294|27|14|21.94|56.60|35.09|301.14|491.26|307.16|792.40|44.21|0.00|118.86|491.26|535.47|610.12|654.33|184.10| +2450884|35444|2450952|11053|31979|1234440|4901|13715|46998|1484245|5851|33912|4|26|15|3|234|27|14|24.32|51.55|38.66|180.46|541.24|340.48|721.70|48.71|0.00|180.32|541.24|589.95|721.56|770.27|200.76| +2451039|62333|2451050|11522|24997|871580|6163|36746|10052|1758318|4848|35431|7|19|16|2|298|28|30|66.66|189.98|182.38|228.00|5471.40|1999.80|5699.40|273.57|0.00|56.70|5471.40|5744.97|5528.10|5801.67|3471.60| +2451039|62333|2451062|11197|24997|871580|6163|36746|10052|1758318|4848|35431|16|10|6|3|260|28|33|44.85|64.13|28.21|1185.36|930.93|1480.05|2116.29|83.78|0.00|169.29|930.93|1014.71|1100.22|1184.00|-549.12| +2451039|62333|2451109|17164|24997|871580|6163|36746|10052|1758318|4848|35431|1|26|10|1|69|28|56|85.54|148.83|104.18|2500.40|5834.08|4790.24|8334.48|233.36|0.00|1833.44|5834.08|6067.44|7667.52|7900.88|1043.84| +2451039|62333|2451113|4058|24997|871580|6163|36746|10052|1758318|4848|35431|43|14|14|2|183|28|63|15.93|30.90|27.19|233.73|1712.97|1003.59|1946.70|137.03|0.00|856.17|1712.97|1850.00|2569.14|2706.17|709.38| +2451039|62333|2451155|11881|24997|871580|6163|36746|10052|1758318|4848|35431|25|2|11|5|209|28|28|84.48|119.11|39.30|2234.68|1100.40|2365.44|3335.08|55.02|0.00|800.24|1100.40|1155.42|1900.64|1955.66|-1265.04| +2451039|62333|2451043|3721|24997|871580|6163|36746|10052|1758318|4848|35431|58|7|18|2|256|28|89|97.81|97.81|86.07|1044.86|7660.23|8705.09|8705.09|536.21|0.00|1914.39|7660.23|8196.44|9574.62|10110.83|-1044.86| +2451039|62333|2451155|7190|24997|871580|6163|36746|10052|1758318|4848|35431|46|19|19|5|14|28|1|99.95|175.91|66.84|109.07|66.84|99.95|175.91|0.19|47.45|33.42|19.39|19.58|52.81|53.00|-80.56| +2451039|62333|2451122|286|24997|871580|6163|36746|10052|1758318|4848|35431|26|20|15|2|219|28|39|62.06|73.23|8.78|2513.55|342.42|2420.34|2855.97|0.00|219.14|771.03|123.28|123.28|894.31|894.31|-2297.06| +2451039|62333|2451074|1519|24997|871580|6163|36746|10052|1758318|4848|35431|14|28|12|2|269|28|34|71.14|165.75|109.39|1916.24|3719.26|2418.76|5635.50|0.00|0.00|56.10|3719.26|3719.26|3775.36|3775.36|1300.50| +2451039|62333|2451118|9403|24997|871580|6163|36746|10052|1758318|4848|35431|38|22|4|3|235|28|74|74.71|132.98|117.02|1181.04|8659.48|5528.54|9840.52|173.18|0.00|196.10|8659.48|8832.66|8855.58|9028.76|3130.94| +2451039|62333|2451087|5353|24997|871580|6163|36746|10052|1758318|4848|35431|56|20|16|1|33|28|75|45.15|130.93|60.22|5303.25|4516.50|3386.25|9819.75|0.00|0.00|687.00|4516.50|4516.50|5203.50|5203.50|1130.25| +2451039|62333|2451048|13004|24997|871580|6163|36746|10052|1758318|4848|35431|19|26|18|2|93|28|25|45.99|57.48|27.01|761.75|675.25|1149.75|1437.00|54.02|0.00|43.00|675.25|729.27|718.25|772.27|-474.50| +2452220|41441|2452320|11879|57028|556763|1298|3033|18962|1496846|5391|47079|17|13|12|2|45|29|91|39.18|64.25|34.69|2689.96|3156.79|3565.38|5846.75|220.97|0.00|1695.33|3156.79|3377.76|4852.12|5073.09|-408.59| +2452220|41441|2452314|17021|57028|556763|1298|3033|18962|1496846|5391|47079|15|7|20|3|239|29|37|60.58|164.77|74.14|3353.31|2743.18|2241.46|6096.49|1.64|2578.58|1340.88|164.60|166.24|1505.48|1507.12|-2076.86| +2452220|41441|2452229|4193|57028|556763|1298|3033|18962|1496846|5391|47079|31|21|19|2|95|29|82|23.49|50.97|45.36|460.02|3719.52|1926.18|4179.54|185.97|0.00|208.28|3719.52|3905.49|3927.80|4113.77|1793.34| +2452220|41441|2452281|13269|57028|556763|1298|3033|18962|1496846|5391|47079|25|9|17|2|244|29|55|34.69|39.19|36.44|151.25|2004.20|1907.95|2155.45|27.05|1463.06|172.15|541.14|568.19|713.29|740.34|-1366.81| +2452220|41441|2452285|12889|57028|556763|1298|3033|18962|1496846|5391|47079|53|3|17|5|220|29|49|22.48|56.64|24.35|1582.21|1193.15|1101.52|2775.36|107.38|0.00|999.11|1193.15|1300.53|2192.26|2299.64|91.63| +2452220|41441|2452279|12911|57028|556763|1298|3033|18962|1496846|5391|47079|5|15|20|5|120|29|89|97.71|276.51|218.44|5168.23|19441.16|8696.19|24609.39|0.00|0.00|2460.85|19441.16|19441.16|21902.01|21902.01|10744.97| +2452220|41441|2452256|5731|57028|556763|1298|3033|18962|1496846|5391|47079|3|29|11|4|107|29|56|96.93|107.59|71.00|2049.04|3976.00|5428.08|6025.04|155.06|874.72|1204.56|3101.28|3256.34|4305.84|4460.90|-2326.80| +2452220|41441|2452337|3275|57028|556763|1298|3033|18962|1496846|5391|47079|3|5|7|3|262|29|27|73.21|201.32|16.10|5000.94|434.70|1976.67|5435.64|13.04|0.00|652.05|434.70|447.74|1086.75|1099.79|-1541.97| +2452220|41441|2452276|12777|57028|556763|1298|3033|18962|1496846|5391|47079|33|29|16|2|115|29|87|97.21|228.44|43.40|16098.48|3775.80|8457.27|19874.28|226.54|0.00|7353.24|3775.80|4002.34|11129.04|11355.58|-4681.47| +2452220|41441|2452282|2765|57028|556763|1298|3033|18962|1496846|5391|47079|9|11|7|3|279|29|9|95.82|248.17|213.42|312.75|1920.78|862.38|2233.53|172.87|0.00|960.39|1920.78|2093.65|2881.17|3054.04|1058.40| +2451150|41465|2451252|13568|73211|866401|5991|7181|36734|1886542|798|2788|1|19|6|2|21|30|98|40.69|90.33|56.90|3276.14|5576.20|3987.62|8852.34|111.52|0.00|1061.34|5576.20|5687.72|6637.54|6749.06|1588.58| +2451150|41465|2451178|9163|73211|866401|5991|7181|36734|1886542|798|2788|28|7|3|2|241|30|83|76.03|146.73|111.51|2923.26|9255.33|6310.49|12178.59|283.21|6108.51|1460.80|3146.82|3430.03|4607.62|4890.83|-3163.67| +2451150|41465|2451255|16190|73211|866401|5991|7181|36734|1886542|798|2788|22|26|3|5|267|30|35|8.53|12.53|10.90|57.05|381.50|298.55|438.55|34.33|0.00|12.95|381.50|415.83|394.45|428.78|82.95| +2451150|41465|2451192|5546|73211|866401|5991|7181|36734|1886542|798|2788|43|26|13|5|147|30|34|91.37|161.72|9.70|5168.68|329.80|3106.58|5498.48|9.89|0.00|824.50|329.80|339.69|1154.30|1164.19|-2776.78| +2451150|41465|2451184|4009|73211|866401|5991|7181|36734|1886542|798|2788|28|4|14|5|253|30|85|17.11|35.41|8.85|2257.60|752.25|1454.35|3009.85|6.92|406.21|270.30|346.04|352.96|616.34|623.26|-1108.31| +2451150|41465|2451198|9025|73211|866401|5991|7181|36734|1886542|798|2788|4|19|14|1|105|30|50|97.54|258.48|18.09|12019.50|904.50|4877.00|12924.00|54.27|0.00|1550.50|904.50|958.77|2455.00|2509.27|-3972.50| +2451150|41465|2451257|9368|73211|866401|5991|7181|36734|1886542|798|2788|8|25|11|1|134|30|71|39.71|41.29|12.79|2023.50|908.09|2819.41|2931.59|81.72|0.00|1406.51|908.09|989.81|2314.60|2396.32|-1911.32| +2451150|41465|2451235|7987|73211|866401|5991|7181|36734|1886542|798|2788|43|10|20|4|228|30|37|81.68|93.11|42.83|1860.36|1584.71|3022.16|3445.07|79.23|0.00|413.29|1584.71|1663.94|1998.00|2077.23|-1437.45| +2451150|41465|2451187|4868|73211|866401|5991|7181|36734|1886542|798|2788|49|22|16|3|160|30|73|63.15|153.45|104.34|3585.03|7616.82|4609.95|11201.85|457.00|0.00|3584.30|7616.82|8073.82|11201.12|11658.12|3006.87| +2451150|41465|2451240|3460|73211|866401|5991|7181|36734|1886542|798|2788|8|22|12|4|289|30|14|14.06|16.59|3.64|181.30|50.96|196.84|232.26|0.15|45.86|88.20|5.10|5.25|93.30|93.45|-191.74| +2451037|54024|2451081|13742|29544|1314548|5952|34556|18322|1519490|6768|24572|40|4|15|2|39|31|10|1.41|3.10|1.67|14.30|16.70|14.10|31.00|0.00|0.00|10.20|16.70|16.70|26.90|26.90|2.60| +2451037|54024|2451154|9745|29544|1314548|5952|34556|18322|1519490|6768|24572|44|22|4|3|152|31|66|97.80|226.89|176.97|3294.72|11680.02|6454.80|14974.74|467.20|0.00|6588.78|11680.02|12147.22|18268.80|18736.00|5225.22| +2451037|54024|2451130|15829|29544|1314548|5952|34556|18322|1519490|6768|24572|56|22|15|3|254|31|27|37.83|101.00|9.09|2481.57|245.43|1021.41|2727.00|2.74|211.06|1090.80|34.37|37.11|1125.17|1127.91|-987.04| +2451037|54024|2451095|16804|29544|1314548|5952|34556|18322|1519490|6768|24572|40|28|19|3|207|31|94|42.95|113.81|73.97|3744.96|6953.18|4037.30|10698.14|66.75|3615.65|3209.16|3337.53|3404.28|6546.69|6613.44|-699.77| +2451037|54024|2451139|9320|29544|1314548|5952|34556|18322|1519490|6768|24572|34|26|20|4|265|31|34|85.40|245.09|117.64|4333.30|3999.76|2903.60|8333.06|279.98|0.00|3749.86|3999.76|4279.74|7749.62|8029.60|1096.16| +2451037|54024|2451065|8722|29544|1314548|5952|34556|18322|1519490|6768|24572|16|10|16|3|103|31|33|59.36|163.24|163.24|0.00|5386.92|1958.88|5386.92|323.21|0.00|269.28|5386.92|5710.13|5656.20|5979.41|3428.04| +2451037|54024|2451092|16910|29544|1314548|5952|34556|18322|1519490|6768|24572|37|10|16|1|126|31|69|13.02|28.12|12.65|1067.43|872.85|898.38|1940.28|24.78|253.12|115.92|619.73|644.51|735.65|760.43|-278.65| +2451037|54024|2451150|12733|29544|1314548|5952|34556|18322|1519490|6768|24572|58|22|11|3|115|31|68|16.39|39.33|6.68|2220.20|454.24|1114.52|2674.44|18.16|0.00|801.72|454.24|472.40|1255.96|1274.12|-660.28| +2451037|54024|2451117|7844|29544|1314548|5952|34556|18322|1519490|6768|24572|22|26|9|1|216|31|21|88.24|231.18|152.57|1650.81|3203.97|1853.04|4854.78|0.00|0.00|339.78|3203.97|3203.97|3543.75|3543.75|1350.93| +2451037|54024|2451104|13897|29544|1314548|5952|34556|18322|1519490|6768|24572|28|13|6|3|27|31|37|72.48|118.14|115.77|87.69|4283.49|2681.76|4371.18|133.64|2056.07|261.96|2227.42|2361.06|2489.38|2623.02|-454.34| +2451037|54024|2451132|13141|29544|1314548|5952|34556|18322|1519490|6768|24572|2|19|18|1|64|31|48|32.40|97.20|89.42|373.44|4292.16|1555.20|4665.60|128.76|0.00|792.96|4292.16|4420.92|5085.12|5213.88|2736.96| +2451037|54024|2451125|3916|29544|1314548|5952|34556|18322|1519490|6768|24572|20|1|1|1|241|31|20|7.63|10.22|4.29|118.60|85.80|152.60|204.40|6.00|0.00|94.00|85.80|91.80|179.80|185.80|-66.80| +2451037|54024|2451110|15034|29544|1314548|5952|34556|18322|1519490|6768|24572|8|13|2|4|66|31|24|92.87|157.87|0.00|3788.88|0.00|2228.88|3788.88|0.00|0.00|1742.88|0.00|0.00|1742.88|1742.88|-2228.88| +2451379|64549|2451383|8041|67273|1005447|3621|28548|7907|1111442|1023|4797|19|26|15|1|130|32|89|28.90|60.69|57.65|270.56|5130.85|2572.10|5401.41|102.61|0.00|1566.40|5130.85|5233.46|6697.25|6799.86|2558.75| +2451379|64549|2451470|2894|67273|1005447|3621|28548|7907|1111442|1023|4797|4|4|3|1|116|32|68|38.55|67.07|30.18|2508.52|2052.24|2621.40|4560.76|61.56|0.00|2188.92|2052.24|2113.80|4241.16|4302.72|-569.16| +2451379|64549|2451395|12158|67273|1005447|3621|28548|7907|1111442|1023|4797|14|22|13|2|68|32|19|28.69|81.76|23.71|1102.95|450.49|545.11|1553.44|18.01|0.00|776.72|450.49|468.50|1227.21|1245.22|-94.62| +2451379|64549|2451482|16754|67273|1005447|3621|28548|7907|1111442|1023|4797|10|4|20|2|114|32|6|82.63|122.29|100.27|132.12|601.62|495.78|733.74|12.03|0.00|300.78|601.62|613.65|902.40|914.43|105.84| +2451379|64549|2451496|14227|67273|1005447|3621|28548|7907|1111442|1023|4797|43|13|15|5|128|32|22|51.82|123.84|108.97|327.14|2397.34|1140.04|2724.48|95.89|0.00|980.76|2397.34|2493.23|3378.10|3473.99|1257.30| +2451379|64549|2451442|3043|67273|1005447|3621|28548|7907|1111442|1023|4797|7|22|3|4|262|32|25|55.48|159.22|156.03|79.75|3900.75|1387.00|3980.50|351.06|0.00|875.50|3900.75|4251.81|4776.25|5127.31|2513.75| +2451379|64549|2451471|2078|67273|1005447|3621|28548|7907|1111442|1023|4797|20|14|8|5|202|32|85|86.33|224.45|181.80|3625.25|15453.00|7338.05|19078.25|199.34|8808.21|8775.40|6644.79|6844.13|15420.19|15619.53|-693.26| +2451379|64549|2451414|4274|67273|1005447|3621|28548|7907|1111442|1023|4797|43|26|6|3|50|32|40|38.71|99.48|42.77|2268.40|1710.80|1548.40|3979.20|17.10|0.00|1949.60|1710.80|1727.90|3660.40|3677.50|162.40| +2451379|64549|2451478|14438|67273|1005447|3621|28548|7907|1111442|1023|4797|1|8|16|2|1|32|81|82.74|226.70|36.27|15424.83|2937.87|6701.94|18362.70|235.02|0.00|7528.14|2937.87|3172.89|10466.01|10701.03|-3764.07| +2451379|64549|2451497|3046|67273|1005447|3621|28548|7907|1111442|1023|4797|2|7|3|5|227|32|40|59.52|94.63|67.18|1098.00|2687.20|2380.80|3785.20|80.61|0.00|1703.20|2687.20|2767.81|4390.40|4471.01|306.40| +2451379|64549|2451428|1933|67273|1005447|3621|28548|7907|1111442|1023|4797|55|22|13|4|248|32|3|24.49|29.14|4.95|72.57|14.85|73.47|87.42|0.29|0.00|19.23|14.85|15.14|34.08|34.37|-58.62| +2451379|64549|2451462|15937|67273|1005447|3621|28548|7907|1111442|1023|4797|4|13|11|5|169|32|37|92.05|273.38|49.20|8294.66|1820.40|3405.85|10115.06|163.83|0.00|1213.60|1820.40|1984.23|3034.00|3197.83|-1585.45| +2451379|64549|2451391|11521|67273|1005447|3621|28548|7907|1111442|1023|4797|56|10|3|5|117|32|54|48.67|123.13|102.19|1130.76|5518.26|2628.18|6649.02|386.27|0.00|1130.22|5518.26|5904.53|6648.48|7034.75|2890.08| +2451517|31608|2451570|8431|88444|869059|5674|16336|33131|1695169|4228|25643|20|1|1|3|218|33|86|95.64|159.71|71.86|7555.10|6179.96|8225.04|13735.06|308.99|0.00|5905.62|6179.96|6488.95|12085.58|12394.57|-2045.08| +2451517|31608|2451536|12926|88444|869059|5674|16336|33131|1695169|4228|25643|37|10|20|4|78|33|3|23.57|31.34|14.10|51.72|42.30|70.71|94.02|2.53|0.00|5.64|42.30|44.83|47.94|50.47|-28.41| +2451517|31608|2451576|16490|88444|869059|5674|16336|33131|1695169|4228|25643|44|25|16|4|112|33|31|98.28|283.04|246.24|1140.80|7633.44|3046.68|8774.24|152.66|0.00|1666.87|7633.44|7786.10|9300.31|9452.97|4586.76| +2451517|31608|2451612|7090|88444|869059|5674|16336|33131|1695169|4228|25643|31|4|20|4|151|33|26|58.85|73.56|38.98|899.08|1013.48|1530.10|1912.56|60.80|0.00|382.46|1013.48|1074.28|1395.94|1456.74|-516.62| +2451517|31608|2451582|5407|88444|869059|5674|16336|33131|1695169|4228|25643|28|7|13|2|211|33|21|83.48|170.29|103.87|1394.82|2181.27|1753.08|3576.09|65.43|0.00|1179.99|2181.27|2246.70|3361.26|3426.69|428.19| +2451517|31608|2451631|1807|88444|869059|5674|16336|33131|1695169|4228|25643|16|1|20|2|201|33|87|99.48|239.74|33.56|17937.66|2919.72|8654.76|20857.38|184.52|613.14|10428.69|2306.58|2491.10|12735.27|12919.79|-6348.18| +2451517|31608|2451566|17581|88444|869059|5674|16336|33131|1695169|4228|25643|20|7|10|3|179|33|84|12.17|33.22|12.62|1730.40|1060.08|1022.28|2790.48|63.60|0.00|641.76|1060.08|1123.68|1701.84|1765.44|37.80| +2451517|31608|2451570|3673|88444|869059|5674|16336|33131|1695169|4228|25643|10|26|15|3|241|33|11|79.96|188.70|7.54|1992.76|82.94|879.56|2075.70|2.48|0.00|498.08|82.94|85.42|581.02|583.50|-796.62| +2451517|31608|2451583|14282|88444|869059|5674|16336|33131|1695169|4228|25643|37|28|11|2|176|33|7|36.32|70.46|54.95|108.57|384.65|254.24|493.22|3.84|0.00|177.52|384.65|388.49|562.17|566.01|130.41| +2451517|31608|2451519|3955|88444|869059|5674|16336|33131|1695169|4228|25643|13|25|16|2|135|33|65|73.89|191.37|99.51|5970.90|6468.15|4802.85|12439.05|517.45|0.00|3233.75|6468.15|6985.60|9701.90|10219.35|1665.30| +2451517|31608|2451610|9076|88444|869059|5674|16336|33131|1695169|4228|25643|10|7|2|5|208|33|57|98.95|131.60|121.07|600.21|6900.99|5640.15|7501.20|621.08|0.00|2550.18|6900.99|7522.07|9451.17|10072.25|1260.84| +2451517|31608|2451621|7544|88444|869059|5674|16336|33131|1695169|4228|25643|46|19|16|2|118|33|8|50.93|58.56|43.92|117.12|351.36|407.44|468.48|3.51|0.00|4.64|351.36|354.87|356.00|359.51|-56.08| +2451328|78353|2451355|5629|56679|1142880|1081|39282|47435|1683458|1885|16086|7|25|2|1|70|34|5|85.54|100.93|52.48|242.25|262.40|427.70|504.65|13.12|0.00|80.70|262.40|275.52|343.10|356.22|-165.30| +2451328|78353|2451388|1348|56679|1142880|1081|39282|47435|1683458|1885|16086|26|2|17|5|213|34|41|28.24|78.78|41.75|1518.23|1711.75|1157.84|3229.98|17.11|0.00|1130.37|1711.75|1728.86|2842.12|2859.23|553.91| +2451328|78353|2451367|2266|56679|1142880|1081|39282|47435|1683458|1885|16086|19|10|15|4|93|34|77|66.31|174.39|125.56|3759.91|9668.12|5105.87|13428.03|386.72|0.00|6176.17|9668.12|10054.84|15844.29|16231.01|4562.25| +2451328|78353|2451401|7195|56679|1142880|1081|39282|47435|1683458|1885|16086|40|7|4|2|110|34|96|41.23|77.92|77.14|74.88|7405.44|3958.08|7480.32|148.10|0.00|3665.28|7405.44|7553.54|11070.72|11218.82|3447.36| +2451328|78353|2451420|13102|56679|1142880|1081|39282|47435|1683458|1885|16086|38|8|3|3|216|34|94|18.44|25.81|10.84|1407.18|1018.96|1733.36|2426.14|10.18|0.00|581.86|1018.96|1029.14|1600.82|1611.00|-714.40| +2451328|78353|2451421|7993|56679|1142880|1081|39282|47435|1683458|1885|16086|10|7|2|5|141|34|55|71.17|196.42|143.38|2917.20|7885.90|3914.35|10803.10|315.43|0.00|431.75|7885.90|8201.33|8317.65|8633.08|3971.55| +2451328|78353|2451386|13804|56679|1142880|1081|39282|47435|1683458|1885|16086|46|19|9|5|201|34|52|52.68|150.66|75.33|3917.16|3917.16|2739.36|7834.32|9.40|3760.47|1174.68|156.69|166.09|1331.37|1340.77|-2582.67| +2451328|78353|2451355|136|56679|1142880|1081|39282|47435|1683458|1885|16086|1|28|9|5|214|34|48|59.41|141.39|57.96|4004.64|2782.08|2851.68|6786.72|111.28|0.00|610.56|2782.08|2893.36|3392.64|3503.92|-69.60| +2451328|78353|2451400|8824|56679|1142880|1081|39282|47435|1683458|1885|16086|37|13|15|3|241|34|26|90.63|265.54|249.60|414.44|6489.60|2356.38|6904.04|124.60|259.58|2209.22|6230.02|6354.62|8439.24|8563.84|3873.64| +2451328|78353|2451406|4708|56679|1142880|1081|39282|47435|1683458|1885|16086|20|8|19|3|47|34|70|81.95|117.18|17.57|6972.70|1229.90|5736.50|8202.60|110.69|0.00|574.00|1229.90|1340.59|1803.90|1914.59|-4506.60| +2451328|78353|2451409|15313|56679|1142880|1081|39282|47435|1683458|1885|16086|16|2|12|5|235|34|29|31.69|62.74|55.21|218.37|1601.09|919.01|1819.46|0.00|0.00|873.19|1601.09|1601.09|2474.28|2474.28|682.08| +2451328|78353|2451417|10262|56679|1142880|1081|39282|47435|1683458|1885|16086|19|10|2|3|241|34|15|88.55|170.90|54.68|1743.30|820.20|1328.25|2563.50|65.61|0.00|640.80|820.20|885.81|1461.00|1526.61|-508.05| +2451328|78353|2451415|14864|56679|1142880|1081|39282|47435|1683458|1885|16086|13|14|19|2|108|34|98|84.37|165.36|71.10|9237.48|6967.80|8268.26|16205.28|124.02|766.45|486.08|6201.35|6325.37|6687.43|6811.45|-2066.91| +2451889|42440|2451936|12247|90495|1378316|3290|31464|32170|1831235|5680|41854|50|13|15|3|144|35|82|48.03|73.96|22.92|4185.28|1879.44|3938.46|6064.72|18.79|0.00|2789.64|1879.44|1898.23|4669.08|4687.87|-2059.02| +2451889|42440|2451938|4739|90495|1378316|3290|31464|32170|1831235|5680|41854|43|2|2|2|62|35|3|88.91|130.69|3.92|380.31|11.76|266.73|392.07|0.35|0.00|137.22|11.76|12.11|148.98|149.33|-254.97| +2451889|42440|2451892|13304|90495|1378316|3290|31464|32170|1831235|5680|41854|25|26|15|1|257|35|10|25.49|52.76|12.13|406.30|121.30|254.90|527.60|2.42|0.00|58.00|121.30|123.72|179.30|181.72|-133.60| +2451889|42440|2451922|12923|90495|1378316|3290|31464|32170|1831235|5680|41854|14|2|16|2|81|35|15|29.09|73.59|11.03|938.40|165.45|436.35|1103.85|3.07|62.87|132.45|102.58|105.65|235.03|238.10|-333.77| +2451889|42440|2451982|17189|90495|1378316|3290|31464|32170|1831235|5680|41854|14|5|18|1|281|35|32|84.10|206.04|30.90|5604.48|988.80|2691.20|6593.28|79.10|0.00|725.12|988.80|1067.90|1713.92|1793.02|-1702.40| +2451889|42440|2451906|9506|90495|1378316|3290|31464|32170|1831235|5680|41854|41|5|18|3|138|35|37|70.28|200.29|104.15|3557.18|3853.55|2600.36|7410.73|38.53|0.00|222.00|3853.55|3892.08|4075.55|4114.08|1253.19| +2451889|42440|2451952|16405|90495|1378316|3290|31464|32170|1831235|5680|41854|53|8|16|2|135|35|95|13.14|29.82|19.08|1020.30|1812.60|1248.30|2832.90|36.25|0.00|1217.90|1812.60|1848.85|3030.50|3066.75|564.30| +2451889|42440|2451944|10424|90495|1378316|3290|31464|32170|1831235|5680|41854|53|23|20|5|80|35|82|18.11|34.77|3.47|2566.60|284.54|1485.02|2851.14|8.53|0.00|1197.20|284.54|293.07|1481.74|1490.27|-1200.48| +2451889|42440|2451983|13165|90495|1378316|3290|31464|32170|1831235|5680|41854|8|19|18|3|131|35|16|77.39|152.45|44.21|1731.84|707.36|1238.24|2439.20|0.00|0.00|341.44|707.36|707.36|1048.80|1048.80|-530.88| +2451889|42440|2451965|11816|90495|1378316|3290|31464|32170|1831235|5680|41854|50|11|16|1|96|35|65|8.32|20.96|11.73|599.95|762.45|540.80|1362.40|0.00|0.00|558.35|762.45|762.45|1320.80|1320.80|221.65| +2451889|42440|2451932|13645|90495|1378316|3290|31464|32170|1831235|5680|41854|29|8|12|2|169|35|4|41.77|125.31|71.42|215.56|285.68|167.08|501.24|25.71|0.00|10.00|285.68|311.39|295.68|321.39|118.60| +2451889|42440|2452008|12920|90495|1378316|3290|31464|32170|1831235|5680|41854|35|25|18|3|28|35|51|43.08|62.89|53.45|481.44|2725.95|2197.08|3207.39|81.77|0.00|256.53|2725.95|2807.72|2982.48|3064.25|528.87| +2451889|42440|2451943|8045|90495|1378316|3290|31464|32170|1831235|5680|41854|38|14|13|3|53|35|94|94.33|275.44|82.63|18124.14|7767.22|8867.02|25891.36|39.61|3805.93|5695.46|3961.29|4000.90|9656.75|9696.36|-4905.73| +2451889|42440|2451933|2234|90495|1378316|3290|31464|32170|1831235|5680|41854|31|11|13|3|110|35|38|84.90|135.84|71.99|2426.30|2735.62|3226.20|5161.92|27.35|0.00|980.40|2735.62|2762.97|3716.02|3743.37|-490.58| +2451973|61232|2452037|3687|32782|1743944|5506|19115|59321|467323|7048|23010|49|9|8|1|141|36|83|86.21|154.31|58.63|7941.44|4866.29|7155.43|12807.73|145.98|0.00|2689.20|4866.29|5012.27|7555.49|7701.47|-2289.14| +2451973|61232|2452089|791|32782|1743944|5506|19115|59321|467323|7048|23010|19|1|2|3|244|36|76|92.79|172.58|31.06|10755.52|2360.56|7052.04|13116.08|165.23|0.00|130.72|2360.56|2525.79|2491.28|2656.51|-4691.48| +2451973|61232|2452014|731|32782|1743944|5506|19115|59321|467323|7048|23010|33|13|7|5|300|36|33|72.55|161.06|80.53|2657.49|2657.49|2394.15|5314.98|53.14|0.00|1700.49|2657.49|2710.63|4357.98|4411.12|263.34| +2451973|61232|2452008|8293|32782|1743944|5506|19115|59321|467323|7048|23010|33|29|16|4|235|36|17|2.31|5.12|1.02|69.70|17.34|39.27|87.04|1.56|0.00|35.53|17.34|18.90|52.87|54.43|-21.93| +2451973|61232|2452083|2707|32782|1743944|5506|19115|59321|467323|7048|23010|59|25|6|4|52|36|77|13.47|15.35|5.06|792.33|389.62|1037.19|1181.95|19.48|0.00|0.00|389.62|409.10|389.62|409.10|-647.57| +2451973|61232|2452060|4817|32782|1743944|5506|19115|59321|467323|7048|23010|59|7|8|2|136|36|62|88.75|154.42|146.69|479.26|9094.78|5502.50|9574.04|90.94|0.00|1722.98|9094.78|9185.72|10817.76|10908.70|3592.28| +2451973|61232|2452060|1287|32782|1743944|5506|19115|59321|467323|7048|23010|41|25|12|4|266|36|8|3.01|3.79|2.84|7.60|22.72|24.08|30.32|0.07|21.81|5.12|0.91|0.98|6.03|6.10|-23.17| +2451973|61232|2452032|14255|32782|1743944|5506|19115|59321|467323|7048|23010|41|21|4|4|27|36|53|35.38|68.99|36.56|1718.79|1937.68|1875.14|3656.47|135.63|0.00|1279.42|1937.68|2073.31|3217.10|3352.73|62.54| +2451973|61232|2452058|17607|32782|1743944|5506|19115|59321|467323|7048|23010|17|11|10|2|34|36|75|40.94|81.47|3.25|5866.50|243.75|3070.50|6110.25|0.00|0.00|1710.75|243.75|243.75|1954.50|1954.50|-2826.75| +2451973|61232|2452092|2657|32782|1743944|5506|19115|59321|467323|7048|23010|59|1|19|5|41|36|100|28.71|81.82|5.72|7610.00|572.00|2871.00|8182.00|5.37|34.32|572.00|537.68|543.05|1109.68|1115.05|-2333.32| +2451973|61232|2451979|12139|32782|1743944|5506|19115|59321|467323|7048|23010|3|23|12|2|42|36|53|71.93|98.54|47.29|2716.25|2506.37|3812.29|5222.62|0.00|0.00|1252.92|2506.37|2506.37|3759.29|3759.29|-1305.92| +2451973|61232|2451984|12193|32782|1743944|5506|19115|59321|467323|7048|23010|29|17|16|3|120|36|65|17.34|28.09|16.01|785.20|1040.65|1127.10|1825.85|31.21|0.00|54.60|1040.65|1071.86|1095.25|1126.46|-86.45| +2451973|61232|2452038|8799|32782|1743944|5506|19115|59321|467323|7048|23010|19|25|3|1|209|36|55|27.25|77.39|17.02|3320.35|936.10|1498.75|4256.45|65.52|0.00|978.45|936.10|1001.62|1914.55|1980.07|-562.65| +2451973|61232|2452010|13993|32782|1743944|5506|19115|59321|467323|7048|23010|15|29|9|1|283|36|95|86.47|225.68|173.77|4931.45|16508.15|8214.65|21439.60|660.32|0.00|1286.30|16508.15|17168.47|17794.45|18454.77|8293.50| +2451540|67832|2451558|16492|9872|1545781|4443|21952|9872|1545781|4443|21952|8|10|4|5|16|37|61|81.13|180.10|151.28|1758.02|9228.08|4948.93|10986.10|184.56|0.00|1098.61|9228.08|9412.64|10326.69|10511.25|4279.15| +2451540|67832|2451621|7573|9872|1545781|4443|21952|9872|1545781|4443|21952|4|1|9|5|198|37|100|9.51|25.86|25.08|78.00|2508.00|951.00|2586.00|50.16|0.00|206.00|2508.00|2558.16|2714.00|2764.16|1557.00| +2451540|67832|2451606|12754|9872|1545781|4443|21952|9872|1545781|4443|21952|13|2|7|4|234|37|97|8.74|17.65|15.17|240.56|1471.49|847.78|1712.05|0.00|0.00|342.41|1471.49|1471.49|1813.90|1813.90|623.71| +2451540|67832|2451574|160|9872|1545781|4443|21952|9872|1545781|4443|21952|7|7|14|4|274|37|67|7.84|11.36|3.18|548.06|213.06|525.28|761.12|10.65|0.00|288.77|213.06|223.71|501.83|512.48|-312.22| +2451540|67832|2451622|8320|9872|1545781|4443|21952|9872|1545781|4443|21952|34|7|20|4|105|37|51|64.03|125.49|27.60|4992.39|1407.60|3265.53|6399.99|126.68|0.00|959.82|1407.60|1534.28|2367.42|2494.10|-1857.93| +2451540|67832|2451656|3481|9872|1545781|4443|21952|9872|1545781|4443|21952|20|7|10|3|29|37|23|45.25|132.58|62.31|1616.21|1433.13|1040.75|3049.34|57.32|0.00|213.44|1433.13|1490.45|1646.57|1703.89|392.38| +2451540|67832|2451609|913|9872|1545781|4443|21952|9872|1545781|4443|21952|22|25|14|5|286|37|65|64.72|152.09|13.68|8996.65|889.20|4206.80|9885.85|17.78|0.00|4745.00|889.20|906.98|5634.20|5651.98|-3317.60| +2451540|67832|2451579|6655|9872|1545781|4443|21952|9872|1545781|4443|21952|20|7|4|2|87|37|28|39.63|53.10|40.35|357.00|1129.80|1109.64|1486.80|45.19|0.00|416.08|1129.80|1174.99|1545.88|1591.07|20.16| +2451540|67832|2451638|16921|9872|1545781|4443|21952|9872|1545781|4443|21952|43|19|15|3|281|37|26|82.12|167.52|155.79|304.98|4050.54|2135.12|4355.52|283.53|0.00|1175.98|4050.54|4334.07|5226.52|5510.05|1915.42| +2451540|67832|2451600|4333|9872|1545781|4443|21952|9872|1545781|4443|21952|7|10|18|5|72|37|69|22.87|45.74|5.03|2808.99|347.07|1578.03|3156.06|27.76|0.00|251.85|347.07|374.83|598.92|626.68|-1230.96| +2451540|67832|2451622|1357|9872|1545781|4443|21952|9872|1545781|4443|21952|46|16|7|2|227|37|5|23.05|50.47|45.42|25.25|227.10|115.25|252.35|6.81|0.00|111.00|227.10|233.91|338.10|344.91|111.85| +2451540|67832|2451558|572|9872|1545781|4443|21952|9872|1545781|4443|21952|13|20|5|2|196|37|73|84.68|116.85|85.30|2303.15|6226.90|6181.64|8530.05|373.61|0.00|3752.93|6226.90|6600.51|9979.83|10353.44|45.26| +2451540|67832|2451651|13006|9872|1545781|4443|21952|9872|1545781|4443|21952|19|26|11|5|19|37|80|66.87|106.32|42.52|5104.00|3401.60|5349.60|8505.60|102.04|0.00|509.60|3401.60|3503.64|3911.20|4013.24|-1948.00| +2451540|67832|2451657|15260|9872|1545781|4443|21952|9872|1545781|4443|21952|44|10|19|2|128|37|62|61.83|127.36|81.51|2842.70|5053.62|3833.46|7896.32|0.00|0.00|3316.38|5053.62|5053.62|8370.00|8370.00|1220.16| +2451540|67832|2451647|17144|9872|1545781|4443|21952|9872|1545781|4443|21952|40|2|13|4|245|37|11|17.89|48.12|17.32|338.80|190.52|196.79|529.32|1.90|0.00|79.31|190.52|192.42|269.83|271.73|-6.27| +2451540|67832|2451586|1510|9872|1545781|4443|21952|9872|1545781|4443|21952|43|10|20|4|41|37|25|13.02|23.69|10.89|320.00|272.25|325.50|592.25|8.16|0.00|100.50|272.25|280.41|372.75|380.91|-53.25| +2451706|58174|2451794|3050|87550|500431|2112|46593|56646|1344568|36|41670|19|8|20|3|133|38|80|53.70|56.92|41.55|1229.60|3324.00|4296.00|4553.60|265.92|0.00|136.00|3324.00|3589.92|3460.00|3725.92|-972.00| +2451706|58174|2451816|5960|87550|500431|2112|46593|56646|1344568|36|41670|29|14|8|1|91|38|77|81.68|120.88|84.61|2792.79|6514.97|6289.36|9307.76|195.44|0.00|1023.33|6514.97|6710.41|7538.30|7733.74|225.61| +2451706|58174|2451771|13355|87550|500431|2112|46593|56646|1344568|36|41670|32|5|2|3|254|38|65|57.53|65.58|34.75|2003.95|2258.75|3739.45|4262.70|0.00|0.00|681.85|2258.75|2258.75|2940.60|2940.60|-1480.70| +2451706|58174|2451773|4837|87550|500431|2112|46593|56646|1344568|36|41670|25|29|17|4|231|38|81|27.99|53.18|13.82|3188.16|1119.42|2267.19|4307.58|37.27|705.23|732.24|414.19|451.46|1146.43|1183.70|-1853.00| +2451706|58174|2451823|2807|87550|500431|2112|46593|56646|1344568|36|41670|50|7|5|3|231|38|60|95.90|226.32|83.73|8555.40|5023.80|5754.00|13579.20|43.20|2863.56|0.00|2160.24|2203.44|2160.24|2203.44|-3593.76| +2451706|58174|2451760|668|87550|500431|2112|46593|56646|1344568|36|41670|53|5|16|4|222|38|90|40.03|70.45|19.72|4565.70|1774.80|3602.70|6340.50|124.23|0.00|2536.20|1774.80|1899.03|4311.00|4435.23|-1827.90| +2451706|58174|2451725|8147|87550|500431|2112|46593|56646|1344568|36|41670|14|14|15|2|202|38|63|23.04|66.35|45.78|1295.91|2884.14|1451.52|4180.05|93.44|547.98|501.48|2336.16|2429.60|2837.64|2931.08|884.64| +2451706|58174|2451803|8467|87550|500431|2112|46593|56646|1344568|36|41670|13|19|13|1|199|38|61|1.68|2.03|1.38|39.65|84.18|102.48|123.83|0.00|0.00|6.10|84.18|84.18|90.28|90.28|-18.30| +2451706|58174|2451811|15536|87550|500431|2112|46593|56646|1344568|36|41670|43|7|3|5|17|38|20|84.56|235.92|75.49|3208.60|1509.80|1691.20|4718.40|30.19|0.00|2170.40|1509.80|1539.99|3680.20|3710.39|-181.40| +2451771|7639|2451841|8414|59990|963621|852|10083|35808|780901|1776|10207|35|7|10|3|59|39|55|24.63|53.20|17.55|1960.75|965.25|1354.65|2926.00|38.61|0.00|409.20|965.25|1003.86|1374.45|1413.06|-389.40| +2451771|7639|2451845|12727|59990|963621|852|10083|35808|780901|1776|10207|1|29|12|4|6|39|51|14.23|34.00|15.98|919.02|814.98|725.73|1734.00|57.04|0.00|797.64|814.98|872.02|1612.62|1669.66|89.25| +2451771|7639|2451812|2453|59990|963621|852|10083|35808|780901|1776|10207|59|7|1|5|279|39|79|81.48|190.66|173.50|1355.64|13706.50|6436.92|15062.14|137.06|0.00|6325.53|13706.50|13843.56|20032.03|20169.09|7269.58| +2451771|7639|2451780|11585|59990|963621|852|10083|35808|780901|1776|10207|5|25|3|3|43|39|15|2.97|7.90|1.65|93.75|24.75|44.55|118.50|1.23|0.00|31.95|24.75|25.98|56.70|57.93|-19.80| +2451771|7639|2451833|15116|59990|963621|852|10083|35808|780901|1776|10207|53|29|16|4|92|39|54|42.66|102.81|45.23|3109.32|2442.42|2303.64|5551.74|122.12|0.00|1498.50|2442.42|2564.54|3940.92|4063.04|138.78| +2451771|7639|2451786|7105|59990|963621|852|10083|35808|780901|1776|10207|17|25|17|3|32|39|37|54.94|154.38|1.54|5655.08|56.98|2032.78|5712.06|0.56|0.00|1770.45|56.98|57.54|1827.43|1827.99|-1975.80| +2451771|7639|2451823|3059|59990|963621|852|10083|35808|780901|1776|10207|49|8|5|5|65|39|88|69.90|118.83|39.21|7006.56|3450.48|6151.20|10457.04|138.01|0.00|418.00|3450.48|3588.49|3868.48|4006.49|-2700.72| +2451771|7639|2451861|13933|59990|963621|852|10083|35808|780901|1776|10207|11|26|12|5|8|39|77|62.72|149.27|16.41|10230.22|1263.57|4829.44|11493.79|15.03|1048.76|2643.41|214.81|229.84|2858.22|2873.25|-4614.63| +2451771|7639|2451824|13271|59990|963621|852|10083|35808|780901|1776|10207|31|19|14|3|112|39|34|52.75|132.40|54.28|2656.08|1845.52|1793.50|4501.60|110.73|0.00|1935.62|1845.52|1956.25|3781.14|3891.87|52.02| +2451771|7639|2451825|3371|59990|963621|852|10083|35808|780901|1776|10207|14|11|7|3|249|39|57|89.30|173.24|51.97|6912.39|2962.29|5090.10|9874.68|177.73|0.00|592.23|2962.29|3140.02|3554.52|3732.25|-2127.81| +2451771|7639|2451878|9725|59990|963621|852|10083|35808|780901|1776|10207|13|29|10|3|46|39|35|86.33|219.27|114.02|3683.75|3990.70|3021.55|7674.45|39.90|0.00|2378.95|3990.70|4030.60|6369.65|6409.55|969.15| +2451771|7639|2451834|745|59990|963621|852|10083|35808|780901|1776|10207|20|23|3|1|262|39|44|77.85|159.59|135.65|1053.36|5968.60|3425.40|7021.96|84.75|1730.89|3300.00|4237.71|4322.46|7537.71|7622.46|812.31| +2451771|7639|2451788|31|59990|963621|852|10083|35808|780901|1776|10207|2|1|3|2|194|39|100|21.26|35.50|0.35|3515.00|35.00|2126.00|3550.00|2.45|0.00|497.00|35.00|37.45|532.00|534.45|-2091.00| +2451771|7639|2451852|17555|59990|963621|852|10083|35808|780901|1776|10207|47|7|14|2|117|39|15|87.02|127.91|0.00|1918.65|0.00|1305.30|1918.65|0.00|0.00|0.00|0.00|0.00|0.00|0.00|-1305.30| +2451771|7639|2451845|2315|59990|963621|852|10083|35808|780901|1776|10207|56|19|4|5|269|39|17|42.71|70.04|49.72|345.44|845.24|726.07|1190.68|67.61|0.00|35.70|845.24|912.85|880.94|948.55|119.17| +2451771|7639|2451821|10237|59990|963621|852|10083|35808|780901|1776|10207|20|29|2|4|13|39|90|59.60|107.28|80.46|2413.80|7241.40|5364.00|9655.20|651.72|0.00|2413.80|7241.40|7893.12|9655.20|10306.92|1877.40| +2452232|81936|2452241|14203|16812|1614684|1803|33675|8730|781278|1291|27963|53|3|4|4|294|40|92|24.49|57.30|27.50|2741.60|2530.00|2253.08|5271.60|75.90|0.00|579.60|2530.00|2605.90|3109.60|3185.50|276.92| +2452232|81936|2452302|7355|16812|1614684|1803|33675|8730|781278|1291|27963|39|29|10|5|90|40|15|42.87|100.31|62.19|571.80|932.85|643.05|1504.65|0.93|914.19|315.90|18.66|19.59|334.56|335.49|-624.39| +2452232|81936|2452308|14733|16812|1614684|1803|33675|8730|781278|1291|27963|53|21|12|1|300|40|25|74.12|146.75|127.67|477.00|3191.75|1853.00|3668.75|127.67|0.00|807.00|3191.75|3319.42|3998.75|4126.42|1338.75| +2452232|81936|2452284|1275|16812|1614684|1803|33675|8730|781278|1291|27963|17|13|15|3|31|40|80|57.49|166.14|142.88|1860.80|11430.40|4599.20|13291.20|800.12|0.00|4784.80|11430.40|12230.52|16215.20|17015.32|6831.20| +2452232|81936|2452318|12561|16812|1614684|1803|33675|8730|781278|1291|27963|33|9|3|3|246|40|59|32.09|74.12|9.63|3804.91|568.17|1893.31|4373.08|45.45|0.00|961.70|568.17|613.62|1529.87|1575.32|-1325.14| +2452232|81936|2452252|3205|16812|1614684|1803|33675|8730|781278|1291|27963|1|5|5|4|76|40|87|97.37|127.55|42.09|7435.02|3661.83|8471.19|11096.85|183.09|0.00|3661.83|3661.83|3844.92|7323.66|7506.75|-4809.36| +2452232|81936|2452327|5783|16812|1614684|1803|33675|8730|781278|1291|27963|25|5|6|1|49|40|63|75.34|89.65|31.37|3671.64|1976.31|4746.42|5647.95|158.10|0.00|1072.89|1976.31|2134.41|3049.20|3207.30|-2770.11| +2452232|81936|2452346|2551|16812|1614684|1803|33675|8730|781278|1291|27963|43|9|1|4|210|40|24|52.53|123.44|104.92|444.48|2518.08|1260.72|2962.56|176.26|0.00|888.72|2518.08|2694.34|3406.80|3583.06|1257.36| +2452232|81936|2452307|105|16812|1614684|1803|33675|8730|781278|1291|27963|15|21|4|2|110|40|21|5.29|7.67|1.68|125.79|35.28|111.09|161.07|0.00|0.00|69.09|35.28|35.28|104.37|104.37|-75.81| +2452232|81936|2452294|14913|16812|1614684|1803|33675|8730|781278|1291|27963|13|1|12|1|297|40|78|25.29|55.13|20.94|2666.82|1633.32|1972.62|4300.14|6.86|1290.32|859.56|343.00|349.86|1202.56|1209.42|-1629.62| +2452232|81936|2452287|12283|16812|1614684|1803|33675|8730|781278|1291|27963|45|23|14|3|295|40|43|85.42|122.15|98.94|998.03|4254.42|3673.06|5252.45|170.17|0.00|1050.49|4254.42|4424.59|5304.91|5475.08|581.36| +2452232|81936|2452341|457|16812|1614684|1803|33675|8730|781278|1291|27963|33|21|12|1|217|40|25|62.96|160.54|77.05|2087.25|1926.25|1574.00|4013.50|0.00|0.00|1284.25|1926.25|1926.25|3210.50|3210.50|352.25| +2452232|81936|2452236|14863|16812|1614684|1803|33675|8730|781278|1291|27963|27|17|4|4|174|40|74|99.04|251.56|218.85|2420.54|16194.90|7328.96|18615.44|0.00|0.00|2792.02|16194.90|16194.90|18986.92|18986.92|8865.94| +2452232|81936|2452281|16639|16812|1614684|1803|33675|8730|781278|1291|27963|43|3|2|2|295|40|18|13.05|15.52|11.48|72.72|206.64|234.90|279.36|2.04|2.06|66.96|204.58|206.62|271.54|273.58|-30.32| +2452232|81936|2452310|17059|16812|1614684|1803|33675|8730|781278|1291|27963|11|27|2|2|285|40|45|29.82|36.38|20.00|737.10|900.00|1341.90|1637.10|35.28|459.00|229.05|441.00|476.28|670.05|705.33|-900.90| +2452232|81936|2452339|10589|16812|1614684|1803|33675|8730|781278|1291|27963|53|7|11|3|145|40|76|30.31|60.01|40.20|1505.56|3055.20|2303.56|4560.76|91.65|0.00|1003.20|3055.20|3146.85|4058.40|4150.05|751.64| +2451695|44895|2451749|7655|77062|920710|4805|35253|84180|554438|1797|34645|8|7|10|4|218|41|5|8.40|19.57|3.32|81.25|16.60|42.00|97.85|0.49|0.00|28.35|16.60|17.09|44.95|45.44|-25.40| +2451695|44895|2451780|13928|77062|920710|4805|35253|84180|554438|1797|34645|11|26|9|2|165|41|83|88.07|214.89|79.50|11237.37|6598.50|7309.81|17835.87|461.89|0.00|3744.96|6598.50|7060.39|10343.46|10805.35|-711.31| +2451695|44895|2451804|4763|77062|920710|4805|35253|84180|554438|1797|34645|37|25|17|1|295|41|40|21.24|28.67|21.21|298.40|848.40|849.60|1146.80|25.45|0.00|217.60|848.40|873.85|1066.00|1091.45|-1.20| +2451695|44895|2451735|2305|77062|920710|4805|35253|84180|554438|1797|34645|31|14|7|1|35|41|15|51.21|139.29|30.64|1629.75|459.60|768.15|2089.35|27.57|0.00|626.70|459.60|487.17|1086.30|1113.87|-308.55| +2451695|44895|2451745|16801|77062|920710|4805|35253|84180|554438|1797|34645|37|5|17|4|248|41|19|13.01|16.00|1.92|267.52|36.48|247.19|304.00|0.00|35.75|152.00|0.73|0.73|152.73|152.73|-246.46| +2451695|44895|2451705|8315|77062|920710|4805|35253|84180|554438|1797|34645|50|14|20|5|97|41|62|12.02|16.58|8.29|513.98|513.98|745.24|1027.96|35.97|0.00|431.52|513.98|549.95|945.50|981.47|-231.26| +2451695|44895|2451735|6833|77062|920710|4805|35253|84180|554438|1797|34645|56|11|3|3|80|41|15|50.82|95.03|68.42|399.15|1026.30|762.30|1425.45|41.05|0.00|399.00|1026.30|1067.35|1425.30|1466.35|264.00| +2451695|44895|2451751|17527|77062|920710|4805|35253|84180|554438|1797|34645|56|14|14|5|266|41|25|53.52|80.28|62.61|441.75|1565.25|1338.00|2007.00|46.95|0.00|441.50|1565.25|1612.20|2006.75|2053.70|227.25| +2451695|44895|2451768|9673|77062|920710|4805|35253|84180|554438|1797|34645|29|23|2|4|236|41|95|6.85|9.59|1.05|811.30|99.75|650.75|911.05|1.99|0.00|218.50|99.75|101.74|318.25|320.24|-551.00| +2451695|44895|2451755|15757|77062|920710|4805|35253|84180|554438|1797|34645|2|25|1|2|270|41|28|14.49|22.60|9.26|373.52|259.28|405.72|632.80|20.74|0.00|107.52|259.28|280.02|366.80|387.54|-146.44| +2451695|44895|2451792|16676|77062|920710|4805|35253|84180|554438|1797|34645|53|14|9|5|34|41|36|59.88|78.44|47.84|1101.60|1722.24|2155.68|2823.84|86.11|0.00|847.08|1722.24|1808.35|2569.32|2655.43|-433.44| +2451695|44895|2451748|3134|77062|920710|4805|35253|84180|554438|1797|34645|11|8|14|3|242|41|81|53.84|129.21|10.33|9629.28|836.73|4361.04|10466.01|33.46|0.00|3453.03|836.73|870.19|4289.76|4323.22|-3524.31| +2451695|44895|2451756|16637|77062|920710|4805|35253|84180|554438|1797|34645|53|29|20|5|150|41|59|37.38|43.73|41.98|103.25|2476.82|2205.42|2580.07|99.07|0.00|489.70|2476.82|2575.89|2966.52|3065.59|271.40| +2451695|44895|2451729|3865|77062|920710|4805|35253|84180|554438|1797|34645|37|17|12|2|265|41|28|90.88|238.10|40.47|5533.64|1133.16|2544.64|6666.80|79.32|0.00|2800.00|1133.16|1212.48|3933.16|4012.48|-1411.48| +2451695|44895|2451760|3164|77062|920710|4805|35253|84180|554438|1797|34645|11|19|14|5|179|41|35|9.44|23.50|13.86|337.40|485.10|330.40|822.50|19.40|0.00|8.05|485.10|504.50|493.15|512.55|154.70| +2451131|72439|2451172|5929|65471|863021|1371|10921|59379|860833|2324|37874|7|4|19|5|231|42|17|25.72|46.81|41.19|95.54|700.23|437.24|795.77|1.68|616.20|119.34|84.03|85.71|203.37|205.05|-353.21| +2451131|72439|2451207|8590|65471|863021|1371|10921|59379|860833|2324|37874|49|19|8|3|108|42|7|79.50|87.45|26.23|428.54|183.61|556.50|612.15|3.08|132.19|110.18|51.42|54.50|161.60|164.68|-505.08| +2451131|72439|2451251|3091|65471|863021|1371|10921|59379|860833|2324|37874|40|1|4|2|107|42|98|1.12|1.93|0.77|113.68|75.46|109.76|189.14|3.01|0.00|63.70|75.46|78.47|139.16|142.17|-34.30| +2451131|72439|2451200|12508|65471|863021|1371|10921|59379|860833|2324|37874|16|4|17|1|45|42|52|41.04|92.34|30.47|3217.24|1584.44|2134.08|4801.68|126.75|0.00|671.84|1584.44|1711.19|2256.28|2383.03|-549.64| +2451131|72439|2451201|16081|65471|863021|1371|10921|59379|860833|2324|37874|25|14|16|1|46|42|14|73.48|117.56|43.49|1036.98|608.86|1028.72|1645.84|3.16|529.70|674.66|79.16|82.32|753.82|756.98|-949.56| +2451131|72439|2451219|14503|65471|863021|1371|10921|59379|860833|2324|37874|25|20|19|4|102|42|76|53.02|127.24|0.00|9670.24|0.00|4029.52|9670.24|0.00|0.00|3093.96|0.00|0.00|3093.96|3093.96|-4029.52| +2451131|72439|2451215|5014|65471|863021|1371|10921|59379|860833|2324|37874|44|1|19|2|18|42|8|43.48|86.96|26.08|487.04|208.64|347.84|695.68|10.43|0.00|278.24|208.64|219.07|486.88|497.31|-139.20| +2451131|72439|2451152|3364|65471|863021|1371|10921|59379|860833|2324|37874|16|10|16|3|223|42|71|94.86|175.49|59.66|8223.93|4235.86|6735.06|12459.79|84.71|0.00|6105.29|4235.86|4320.57|10341.15|10425.86|-2499.20| +2451939|48209|2451994|11871|92793|1723260|6174|24241|88257|297951|136|38603|27|23|3|1|269|43|14|46.91|85.84|16.30|973.56|228.20|656.74|1201.76|0.18|209.94|132.16|18.26|18.44|150.42|150.60|-638.48| +2451939|48209|2451966|9141|92793|1723260|6174|24241|88257|297951|136|38603|1|5|5|2|219|43|19|52.65|61.60|14.78|889.58|280.82|1000.35|1170.40|25.27|0.00|315.97|280.82|306.09|596.79|622.06|-719.53| +2451939|48209|2452057|12785|92793|1723260|6174|24241|88257|297951|136|38603|49|11|17|3|219|43|8|6.43|9.51|5.42|32.72|43.36|51.44|76.08|3.90|0.00|4.56|43.36|47.26|47.92|51.82|-8.08| +2451939|48209|2451979|9201|92793|1723260|6174|24241|88257|297951|136|38603|49|27|10|4|145|43|51|43.09|50.41|40.83|488.58|2082.33|2197.59|2570.91|145.76|0.00|1208.19|2082.33|2228.09|3290.52|3436.28|-115.26| +2451939|48209|2451954|551|92793|1723260|6174|24241|88257|297951|136|38603|7|3|10|3|79|43|100|86.90|114.70|73.40|4130.00|7340.00|8690.00|11470.00|42.57|5211.40|4014.00|2128.60|2171.17|6142.60|6185.17|-6561.40| +2451939|48209|2451950|349|92793|1723260|6174|24241|88257|297951|136|38603|39|19|7|4|204|43|30|56.32|78.84|19.71|1773.90|591.30|1689.60|2365.20|41.39|0.00|236.40|591.30|632.69|827.70|869.09|-1098.30| +2451939|48209|2452055|17201|92793|1723260|6174|24241|88257|297951|136|38603|5|11|19|2|165|43|61|62.21|179.16|46.58|8087.38|2841.38|3794.81|10928.76|28.41|0.00|5027.01|2841.38|2869.79|7868.39|7896.80|-953.43| +2451939|48209|2452009|385|92793|1723260|6174|24241|88257|297951|136|38603|15|15|18|5|225|43|74|57.16|106.31|15.94|6687.38|1179.56|4229.84|7866.94|1.76|1144.17|314.50|35.39|37.15|349.89|351.65|-4194.45| +2451134|80873|2451161|17071|97122|1431191|2060|19329|57166|125023|2336|15605|31|7|19|2|222|44|16|69.46|163.23|19.58|2298.40|313.28|1111.36|2611.68|25.06|0.00|26.08|313.28|338.34|339.36|364.42|-798.08| +2451134|80873|2451173|5578|97122|1431191|2060|19329|57166|125023|2336|15605|1|28|19|1|200|44|60|90.06|230.55|209.80|1245.00|12588.00|5403.60|13833.00|251.76|0.00|1936.20|12588.00|12839.76|14524.20|14775.96|7184.40| +2451134|80873|2451193|973|97122|1431191|2060|19329|57166|125023|2336|15605|19|16|8|2|103|44|53|65.28|129.90|120.80|482.30|6402.40|3459.84|6884.70|128.04|0.00|1583.11|6402.40|6530.44|7985.51|8113.55|2942.56| +2451134|80873|2451190|3820|97122|1431191|2060|19329|57166|125023|2336|15605|43|26|19|1|193|44|10|80.30|165.41|107.51|579.00|1075.10|803.00|1654.10|83.21|150.51|827.00|924.59|1007.80|1751.59|1834.80|121.59| +2451134|80873|2451229|3223|97122|1431191|2060|19329|57166|125023|2336|15605|19|20|12|1|127|44|27|83.59|242.41|116.35|3403.62|3141.45|2256.93|6545.07|94.24|0.00|1832.49|3141.45|3235.69|4973.94|5068.18|884.52| +2451134|80873|2451217|14728|97122|1431191|2060|19329|57166|125023|2336|15605|16|28|1|5|266|44|23|50.49|122.18|21.99|2304.37|505.77|1161.27|2810.14|19.82|257.94|1095.95|247.83|267.65|1343.78|1363.60|-913.44| +2451134|80873|2451222|3889|97122|1431191|2060|19329|57166|125023|2336|15605|2|13|2|2|239|44|52|96.16|190.39|91.38|5148.52|4751.76|5000.32|9900.28|237.58|0.00|1187.68|4751.76|4989.34|5939.44|6177.02|-248.56| +2451134|80873|2451223|13294|97122|1431191|2060|19329|57166|125023|2336|15605|52|13|20|3|233|44|28|47.87|102.92|26.75|2132.76|749.00|1340.36|2881.76|5.24|644.14|1412.04|104.86|110.10|1516.90|1522.14|-1235.50| +2451134|80873|2451211|139|97122|1431191|2060|19329|57166|125023|2336|15605|8|8|8|1|21|44|36|20.77|39.04|3.51|1279.08|126.36|747.72|1405.44|10.10|0.00|351.36|126.36|136.46|477.72|487.82|-621.36| +2451150|39372|2451203|8912|39927|464905|4730|16108|60212|1659758|1889|40114|20|1|1|3|278|45|94|76.32|135.84|103.23|3065.34|9703.62|7174.08|12768.96|291.10|0.00|1786.94|9703.62|9994.72|11490.56|11781.66|2529.54| +2451150|39372|2451237|14362|39927|464905|4730|16108|60212|1659758|1889|40114|14|1|11|4|227|45|47|2.20|2.31|1.15|54.52|54.05|103.40|108.57|2.70|0.00|31.02|54.05|56.75|85.07|87.77|-49.35| +2451150|39372|2451249|1502|39927|464905|4730|16108|60212|1659758|1889|40114|38|19|20|5|167|45|25|86.71|234.98|129.23|2643.75|3230.75|2167.75|5874.50|32.30|0.00|117.25|3230.75|3263.05|3348.00|3380.30|1063.00| +2451150|39372|2451164|10166|39927|464905|4730|16108|60212|1659758|1889|40114|1|28|12|4|6|45|3|85.74|168.05|31.92|408.39|95.76|257.22|504.15|3.83|0.00|191.55|95.76|99.59|287.31|291.14|-161.46| +2451150|39372|2451233|7315|39927|464905|4730|16108|60212|1659758|1889|40114|13|25|16|2|158|45|33|50.34|61.91|9.28|1736.79|306.24|1661.22|2043.03|21.43|0.00|408.54|306.24|327.67|714.78|736.21|-1354.98| +2451150|39372|2451188|10448|39927|464905|4730|16108|60212|1659758|1889|40114|16|19|11|2|230|45|97|38.73|89.85|85.35|436.50|8278.95|3756.81|8715.45|82.78|0.00|2352.25|8278.95|8361.73|10631.20|10713.98|4522.14| +2451150|39372|2451167|2414|39927|464905|4730|16108|60212|1659758|1889|40114|7|26|4|4|14|45|26|53.43|110.06|3.30|2775.76|85.80|1389.18|2861.56|0.00|5.14|1258.92|80.66|80.66|1339.58|1339.58|-1308.52| +2451150|39372|2451261|11431|39927|464905|4730|16108|60212|1659758|1889|40114|58|16|11|5|149|45|76|60.25|160.26|80.13|6089.88|6089.88|4579.00|12179.76|16.44|4445.61|4750.00|1644.27|1660.71|6394.27|6410.71|-2934.73| +2452004|58928|2452009|515|81081|164538|2395|11052|47597|1141632|3070|1327|5|1|12|5|281|46|14|21.02|49.81|44.33|76.72|620.62|294.28|697.34|12.41|0.00|264.88|620.62|633.03|885.50|897.91|326.34| +2452004|58928|2452036|4843|81081|164538|2395|11052|47597|1141632|3070|1327|59|29|7|2|277|46|19|62.38|84.21|30.31|1024.10|575.89|1185.22|1599.99|46.07|0.00|783.94|575.89|621.96|1359.83|1405.90|-609.33| +2452004|58928|2452051|3385|81081|164538|2395|11052|47597|1141632|3070|1327|17|25|4|5|69|46|13|3.52|8.55|8.03|6.76|104.39|45.76|111.15|5.21|0.00|6.63|104.39|109.60|111.02|116.23|58.63| +2452004|58928|2452114|1441|81081|164538|2395|11052|47597|1141632|3070|1327|27|19|19|3|67|46|59|14.65|31.64|28.79|168.15|1698.61|864.35|1866.76|0.00|0.00|709.18|1698.61|1698.61|2407.79|2407.79|834.26| +2452004|58928|2452033|1791|81081|164538|2395|11052|47597|1141632|3070|1327|7|21|11|3|138|46|62|36.49|105.09|11.55|5799.48|716.10|2262.38|6515.58|50.12|0.00|65.10|716.10|766.22|781.20|831.32|-1546.28| +2452004|58928|2452035|11565|81081|164538|2395|11052|47597|1141632|3070|1327|33|7|18|2|16|46|46|42.48|72.64|60.29|568.10|2773.34|1954.08|3341.44|55.46|0.00|1302.72|2773.34|2828.80|4076.06|4131.52|819.26| +2452004|58928|2452081|11099|81081|164538|2395|11052|47597|1141632|3070|1327|7|9|3|4|45|46|90|61.80|130.39|19.55|9975.60|1759.50|5562.00|11735.10|158.35|0.00|585.90|1759.50|1917.85|2345.40|2503.75|-3802.50| +2452004|58928|2452094|12233|81081|164538|2395|11052|47597|1141632|3070|1327|57|19|1|5|246|46|89|54.96|146.74|96.84|4441.10|8618.76|4891.44|13059.86|444.72|1206.62|4309.38|7412.14|7856.86|11721.52|12166.24|2520.70| +2452004|58928|2452006|13719|81081|164538|2395|11052|47597|1141632|3070|1327|49|1|7|3|49|46|99|26.43|47.83|43.52|426.69|4308.48|2616.57|4735.17|86.16|0.00|141.57|4308.48|4394.64|4450.05|4536.21|1691.91| +2452004|58928|2452029|5447|81081|164538|2395|11052|47597|1141632|3070|1327|25|19|20|2|197|46|96|24.79|26.52|25.45|102.72|2443.20|2379.84|2545.92|48.86|0.00|75.84|2443.20|2492.06|2519.04|2567.90|63.36| +2452004|58928|2452083|9161|81081|164538|2395|11052|47597|1141632|3070|1327|43|13|9|1|163|46|69|76.67|86.63|65.83|1435.20|4542.27|5290.23|5977.47|234.38|635.91|2689.62|3906.36|4140.74|6595.98|6830.36|-1383.87| +2452004|58928|2452112|11823|81081|164538|2395|11052|47597|1141632|3070|1327|29|21|7|3|179|46|65|87.93|177.61|42.62|8774.35|2770.30|5715.45|11544.65|83.10|0.00|4271.15|2770.30|2853.40|7041.45|7124.55|-2945.15| +2452004|58928|2452110|15015|81081|164538|2395|11052|47597|1141632|3070|1327|55|13|3|2|258|46|47|33.51|45.23|28.49|786.78|1339.03|1574.97|2125.81|80.34|0.00|722.39|1339.03|1419.37|2061.42|2141.76|-235.94| +2452004|58928|2452020|6013|81081|164538|2395|11052|47597|1141632|3070|1327|53|3|4|4|54|46|23|4.36|8.19|3.19|115.00|73.37|100.28|188.37|2.20|0.00|41.40|73.37|75.57|114.77|116.97|-26.91| +2451533|33585|2451542|10142|76684|1465338|6439|14293|32665|1518423|6749|20203|7|13|3|1|32|47|91|98.12|195.25|117.15|7107.10|10660.65|8928.92|17767.75|95.94|1066.06|5329.87|9594.59|9690.53|14924.46|15020.40|665.67| +2451533|33585|2451572|15604|76684|1465338|6439|14293|32665|1518423|6749|20203|49|1|17|3|72|47|10|4.55|8.19|0.73|74.60|7.30|45.50|81.90|0.07|0.00|17.10|7.30|7.37|24.40|24.47|-38.20| +2451533|33585|2451553|1456|76684|1465338|6439|14293|32665|1518423|6749|20203|50|13|11|2|183|47|12|7.67|18.79|10.14|103.80|121.68|92.04|225.48|3.65|0.00|108.12|121.68|125.33|229.80|233.45|29.64| +2451533|33585|2451555|1297|76684|1465338|6439|14293|32665|1518423|6749|20203|55|2|13|4|215|47|87|43.05|121.83|65.78|4876.35|5722.86|3745.35|10599.21|400.60|0.00|635.10|5722.86|6123.46|6357.96|6758.56|1977.51| +2451533|33585|2451635|4952|76684|1465338|6439|14293|32665|1518423|6749|20203|40|20|18|5|235|47|38|22.24|64.94|41.56|888.44|1579.28|845.12|2467.72|78.96|0.00|1159.76|1579.28|1658.24|2739.04|2818.00|734.16| +2451533|33585|2451634|8497|76684|1465338|6439|14293|32665|1518423|6749|20203|13|14|19|5|41|47|58|3.32|6.04|5.97|4.06|346.26|192.56|350.32|11.63|55.40|41.76|290.86|302.49|332.62|344.25|98.30| +2451533|33585|2451621|44|76684|1465338|6439|14293|32665|1518423|6749|20203|26|2|10|3|49|47|24|4.61|8.75|0.61|195.36|14.64|110.64|210.00|1.31|0.00|69.12|14.64|15.95|83.76|85.07|-96.00| +2451533|33585|2451579|7444|76684|1465338|6439|14293|32665|1518423|6749|20203|58|1|15|4|163|47|4|90.91|113.63|31.81|327.28|127.24|363.64|454.52|8.90|0.00|168.16|127.24|136.14|295.40|304.30|-236.40| +2451533|33585|2451644|9674|76684|1465338|6439|14293|32665|1518423|6749|20203|19|8|10|3|193|47|67|2.83|6.19|0.24|398.65|16.08|189.61|414.73|0.48|0.00|57.62|16.08|16.56|73.70|74.18|-173.53| +2451439|19447|2451495|2968|83448|868542|4036|13577|76051|209298|4059|9200|8|20|7|1|172|48|40|55.26|147.54|116.55|1239.60|4662.00|2210.40|5901.60|419.58|0.00|530.80|4662.00|5081.58|5192.80|5612.38|2451.60| +2451439|19447|2451510|16682|83448|868542|4036|13577|76051|209298|4059|9200|52|2|14|1|8|48|61|86.80|165.78|48.07|7180.31|2932.27|5294.80|10112.58|80.93|909.00|3640.48|2023.27|2104.20|5663.75|5744.68|-3271.53| +2451439|19447|2451535|11158|83448|868542|4036|13577|76051|209298|4059|9200|43|19|18|4|27|48|55|96.71|278.52|272.94|306.90|15011.70|5319.05|15318.60|900.70|0.00|7199.50|15011.70|15912.40|22211.20|23111.90|9692.65| +2451439|19447|2451497|178|83448|868542|4036|13577|76051|209298|4059|9200|14|16|10|1|162|48|88|65.09|102.19|85.83|1439.68|7553.04|5727.92|8992.72|226.59|0.00|3686.32|7553.04|7779.63|11239.36|11465.95|1825.12| +2451439|19447|2451538|17092|83448|868542|4036|13577|76051|209298|4059|9200|2|2|12|2|242|48|80|3.23|6.33|2.34|319.20|187.20|258.40|506.40|0.26|174.09|222.40|13.11|13.37|235.51|235.77|-245.29| +2451439|19447|2451465|9268|83448|868542|4036|13577|76051|209298|4059|9200|38|25|19|5|151|48|25|60.97|63.40|24.09|982.75|602.25|1524.25|1585.00|48.18|0.00|63.25|602.25|650.43|665.50|713.68|-922.00| +2451439|19447|2451542|9910|83448|868542|4036|13577|76051|209298|4059|9200|52|4|9|5|171|48|20|84.82|199.32|41.85|3149.40|837.00|1696.40|3986.40|14.56|351.54|597.80|485.46|500.02|1083.26|1097.82|-1210.94| +2451439|19447|2451450|17834|83448|868542|4036|13577|76051|209298|4059|9200|7|25|5|4|273|48|100|25.69|45.72|2.74|4298.00|274.00|2569.00|4572.00|16.44|0.00|2011.00|274.00|290.44|2285.00|2301.44|-2295.00| +2451439|19447|2451490|17704|83448|868542|4036|13577|76051|209298|4059|9200|50|20|13|3|40|48|4|34.27|92.18|21.20|283.92|84.80|137.08|368.72|7.63|0.00|99.52|84.80|92.43|184.32|191.95|-52.28| +2451439|19447|2451485|16357|83448|868542|4036|13577|76051|209298|4059|9200|37|25|4|5|226|48|35|83.90|100.68|27.18|2572.50|951.30|2936.50|3523.80|57.07|0.00|1515.15|951.30|1008.37|2466.45|2523.52|-1985.20| +2451439|19447|2451528|7276|83448|868542|4036|13577|76051|209298|4059|9200|19|4|6|4|259|48|4|31.95|51.12|20.95|120.68|83.80|127.80|204.48|5.86|0.00|94.04|83.80|89.66|177.84|183.70|-44.00| +2451439|19447|2451507|17149|83448|868542|4036|13577|76051|209298|4059|9200|26|19|6|5|111|48|42|79.67|137.82|59.26|3299.52|2488.92|3346.14|5788.44|199.11|0.00|1273.44|2488.92|2688.03|3762.36|3961.47|-857.22| +2451439|19447|2451555|14000|83448|868542|4036|13577|76051|209298|4059|9200|52|19|13|3|26|48|49|21.45|58.12|27.31|1509.69|1338.19|1051.05|2847.88|0.00|0.00|113.68|1338.19|1338.19|1451.87|1451.87|287.14| +2451439|19447|2451466|17306|83448|868542|4036|13577|76051|209298|4059|9200|13|26|11|3|245|48|41|65.98|141.19|0.00|5788.79|0.00|2705.18|5788.79|0.00|0.00|578.51|0.00|0.00|578.51|578.51|-2705.18| +2451439|19447|2451554|4916|83448|868542|4036|13577|76051|209298|4059|9200|58|28|17|2|224|48|19|15.99|42.05|24.80|327.75|471.20|303.81|798.95|18.84|0.00|207.67|471.20|490.04|678.87|697.71|167.39| +2451439|19447|2451511|7819|83448|868542|4036|13577|76051|209298|4059|9200|22|16|3|5|97|48|29|24.69|48.88|5.37|1261.79|155.73|716.01|1417.52|0.00|0.00|269.12|155.73|155.73|424.85|424.85|-560.28| +2452291|9182|2452394|1281|74159|77626|3940|49054|71649|1811922|2702|9816|45|30|7|4|141|49|65|43.80|47.74|15.75|2079.35|1023.75|2847.00|3103.10|10.23|0.00|589.55|1023.75|1033.98|1613.30|1623.53|-1823.25| +2452291|9182|2452346|12033|74159|77626|3940|49054|71649|1811922|2702|9816|3|27|6|3|57|49|24|15.33|31.73|23.16|205.68|555.84|367.92|761.52|8.00|466.90|251.28|88.94|96.94|340.22|348.22|-278.98| +2452291|9182|2452392|10713|74159|77626|3940|49054|71649|1811922|2702|9816|18|1|16|3|83|49|40|71.16|183.59|38.55|5801.60|1542.00|2846.40|7343.60|92.52|0.00|440.40|1542.00|1634.52|1982.40|2074.92|-1304.40| +2452291|9182|2452381|1861|74159|77626|3940|49054|71649|1811922|2702|9816|27|27|11|1|51|49|56|42.41|51.31|49.77|86.24|2787.12|2374.96|2873.36|83.61|0.00|1149.12|2787.12|2870.73|3936.24|4019.85|412.16| +2452291|9182|2452389|17913|74159|77626|3940|49054|71649|1811922|2702|9816|21|30|19|3|87|49|32|68.14|196.24|49.06|4709.76|1569.92|2180.48|6279.68|0.00|0.00|2009.28|1569.92|1569.92|3579.20|3579.20|-610.56| +2452291|9182|2452318|17973|74159|77626|3940|49054|71649|1811922|2702|9816|9|7|6|3|257|49|14|77.88|144.07|36.01|1512.84|504.14|1090.32|2016.98|25.20|0.00|1008.42|504.14|529.34|1512.56|1537.76|-586.18| +2452291|9182|2452395|6399|74159|77626|3940|49054|71649|1811922|2702|9816|27|9|16|4|105|49|25|24.75|48.01|4.80|1080.25|120.00|618.75|1200.25|7.20|0.00|588.00|120.00|127.20|708.00|715.20|-498.75| +2452291|9182|2452350|13365|74159|77626|3940|49054|71649|1811922|2702|9816|27|1|8|3|45|49|81|65.92|166.11|134.54|2557.17|10897.74|5339.52|13454.91|762.84|0.00|5650.56|10897.74|11660.58|16548.30|17311.14|5558.22| +2452291|9182|2452382|9816|74159|77626|3940|49054|71649|1811922|2702|9816|1|1|11|1|181|49|22|96.03|189.17|3.78|4078.58|83.16|2112.66|4161.74|0.83|0.00|166.32|83.16|83.99|249.48|250.31|-2029.50| +2452291|9182|2452294|8883|74159|77626|3940|49054|71649|1811922|2702|9816|37|24|2|2|12|49|91|42.85|53.13|42.50|967.33|3867.50|3899.35|4834.83|142.32|2088.45|1111.11|1779.05|1921.37|2890.16|3032.48|-2120.30| +2452291|9182|2452340|6372|74159|77626|3940|49054|71649|1811922|2702|9816|9|21|20|1|112|49|20|97.01|186.25|80.08|2123.40|1601.60|1940.20|3725.00|144.14|0.00|782.20|1601.60|1745.74|2383.80|2527.94|-338.60| +2452291|9182|2452305|10083|74159|77626|3940|49054|71649|1811922|2702|9816|45|15|13|3|226|49|5|62.93|71.11|17.77|266.70|88.85|314.65|355.55|7.99|0.00|88.85|88.85|96.84|177.70|185.69|-225.80| +2452291|9182|2452329|7494|74159|77626|3940|49054|71649|1811922|2702|9816|6|12|19|5|158|49|20|37.84|55.24|5.52|994.40|110.40|756.80|1104.80|0.00|37.53|110.40|72.87|72.87|183.27|183.27|-683.93| +2452586|44689|2452649|1647|99181|380076|4171|7866|75251|339248|2151|19446|13|12|12|1|10|50|66|22.44|48.47|21.32|1791.90|1407.12|1481.04|3199.02|0.00|0.00|671.22|1407.12|1407.12|2078.34|2078.34|-73.92| +2452586|44689|2452666|4338|99181|380076|4171|7866|75251|339248|2151|19446|37|12|16|3|20|50|34|56.07|164.28|67.35|3295.62|2289.90|1906.38|5585.52|206.09|0.00|2457.52|2289.90|2495.99|4747.42|4953.51|383.52| +2452586|44689|2452698|11691|99181|380076|4171|7866|75251|339248|2151|19446|7|13|17|2|120|50|41|85.28|160.32|17.63|5850.29|722.83|3496.48|6573.12|28.91|0.00|3220.55|722.83|751.74|3943.38|3972.29|-2773.65| +2452586|44689|2452612|7927|99181|380076|4171|7866|75251|339248|2151|19446|27|25|2|3|17|50|20|75.71|80.25|52.96|545.80|1059.20|1514.20|1605.00|57.19|105.92|176.40|953.28|1010.47|1129.68|1186.87|-560.92| +2452586|44689|2452604|543|99181|380076|4171|7866|75251|339248|2151|19446|31|19|1|5|59|50|14|85.93|118.58|101.97|232.54|1427.58|1203.02|1660.12|114.20|0.00|464.80|1427.58|1541.78|1892.38|2006.58|224.56| +2452586|44689|2452645|6390|99181|380076|4171|7866|75251|339248|2151|19446|13|15|6|2|25|50|59|76.14|85.27|0.85|4980.78|50.15|4492.26|5030.93|2.00|0.00|1257.29|50.15|52.15|1307.44|1309.44|-4442.11| +2452586|44689|2452644|6151|99181|380076|4171|7866|75251|339248|2151|19446|42|3|20|3|176|50|28|65.64|183.13|139.17|1230.88|3896.76|1837.92|5127.64|77.93|0.00|717.64|3896.76|3974.69|4614.40|4692.33|2058.84| +2452586|44689|2452639|10747|99181|380076|4171|7866|75251|339248|2151|19446|48|1|18|2|89|50|18|11.78|15.07|8.28|122.22|149.04|212.04|271.26|0.00|0.00|78.66|149.04|149.04|227.70|227.70|-63.00| +2451876|17566|2451956|8201|31908|1837229|2550|26718|54961|1074489|5881|25249|5|2|1|3|82|51|88|51.83|51.83|5.70|4059.44|501.60|4561.04|4561.04|25.08|0.00|1550.56|501.60|526.68|2052.16|2077.24|-4059.44| +2451876|17566|2451957|2687|31908|1837229|2550|26718|54961|1074489|5881|25249|56|29|1|4|280|51|33|98.35|116.05|27.85|2910.60|919.05|3245.55|3829.65|0.00|0.00|1033.89|919.05|919.05|1952.94|1952.94|-2326.50| +2451876|17566|2451989|7226|31908|1837229|2550|26718|54961|1074489|5881|25249|19|26|8|2|26|51|47|90.57|120.45|110.81|453.08|5208.07|4256.79|5661.15|260.40|0.00|2037.92|5208.07|5468.47|7245.99|7506.39|951.28| +2451876|17566|2451906|4313|31908|1837229|2550|26718|54961|1074489|5881|25249|23|1|3|2|261|51|97|5.40|16.03|7.69|808.98|745.93|523.80|1554.91|29.83|0.00|481.12|745.93|775.76|1227.05|1256.88|222.13| +2451876|17566|2451891|8105|31908|1837229|2550|26718|54961|1074489|5881|25249|53|5|16|2|215|51|48|99.64|102.62|37.96|3103.68|1822.08|4782.72|4925.76|0.00|400.85|1132.80|1421.23|1421.23|2554.03|2554.03|-3361.49| +2451876|17566|2451926|11180|31908|1837229|2550|26718|54961|1074489|5881|25249|50|25|20|2|214|51|33|71.60|158.23|30.06|4229.61|991.98|2362.80|5221.59|59.51|0.00|1775.07|991.98|1051.49|2767.05|2826.56|-1370.82| +2451876|17566|2451897|13339|31908|1837229|2550|26718|54961|1074489|5881|25249|49|7|6|3|40|51|24|84.73|249.10|42.34|4962.24|1016.16|2033.52|5978.40|25.30|172.74|1972.80|843.42|868.72|2816.22|2841.52|-1190.10| +2451876|17566|2451888|10244|31908|1837229|2550|26718|54961|1074489|5881|25249|59|8|7|2|124|51|72|40.11|89.84|17.96|5175.36|1293.12|2887.92|6468.48|38.79|0.00|0.00|1293.12|1331.91|1293.12|1331.91|-1594.80| +2451876|17566|2451891|10115|31908|1837229|2550|26718|54961|1074489|5881|25249|26|7|7|2|57|51|51|27.21|67.75|0.00|3455.25|0.00|1387.71|3455.25|0.00|0.00|587.01|0.00|0.00|587.01|587.01|-1387.71| +2451876|17566|2451888|14384|31908|1837229|2550|26718|54961|1074489|5881|25249|25|14|13|5|60|51|85|81.44|190.56|190.56|0.00|16197.60|6922.40|16197.60|1457.78|0.00|5668.65|16197.60|17655.38|21866.25|23324.03|9275.20| +2451876|17566|2451927|8915|31908|1837229|2550|26718|54961|1074489|5881|25249|13|7|14|1|258|51|31|86.29|89.74|39.48|1558.06|1223.88|2674.99|2781.94|36.71|0.00|500.65|1223.88|1260.59|1724.53|1761.24|-1451.11| +2451876|17566|2451984|12025|31908|1837229|2550|26718|54961|1074489|5881|25249|50|11|3|3|140|51|38|86.64|160.28|36.86|4689.96|1400.68|3292.32|6090.64|56.02|0.00|121.60|1400.68|1456.70|1522.28|1578.30|-1891.64| +2451876|17566|2451931|3950|31908|1837229|2550|26718|54961|1074489|5881|25249|25|23|11|2|46|51|66|54.50|113.36|113.36|0.00|7481.76|3597.00|7481.76|523.72|0.00|523.38|7481.76|8005.48|8005.14|8528.86|3884.76| +2452301|48530|2452380|17928|80393|184753|1913|39826|71596|197177|3259|13619|9|3|2|4|226|52|7|27.24|35.41|24.07|79.38|168.49|190.68|247.87|0.00|121.31|4.90|47.18|47.18|52.08|52.08|-143.50| +2452301|48530|2452408|7254|80393|184753|1913|39826|71596|197177|3259|13619|42|15|5|2|269|52|31|61.74|98.16|84.41|426.25|2616.71|1913.94|3042.96|157.00|654.17|1308.20|1962.54|2119.54|3270.74|3427.74|48.60| +2452301|48530|2452346|4341|80393|184753|1913|39826|71596|197177|3259|13619|25|21|12|5|231|52|28|32.59|62.57|32.53|841.12|910.84|912.52|1751.96|61.20|145.73|122.36|765.11|826.31|887.47|948.67|-147.41| +2452301|48530|2452329|17148|80393|184753|1913|39826|71596|197177|3259|13619|27|9|6|2|68|52|43|16.40|21.64|0.86|893.54|36.98|705.20|930.52|2.95|0.00|288.10|36.98|39.93|325.08|328.03|-668.22| +2452301|48530|2452336|12138|80393|184753|1913|39826|71596|197177|3259|13619|7|18|14|4|47|52|98|66.56|133.12|50.58|8088.92|4956.84|6522.88|13045.76|99.13|0.00|6000.54|4956.84|5055.97|10957.38|11056.51|-1566.04| +2452301|48530|2452357|4842|80393|184753|1913|39826|71596|197177|3259|13619|27|15|15|2|126|52|86|11.22|25.02|21.26|323.36|1828.36|964.92|2151.72|91.41|0.00|408.50|1828.36|1919.77|2236.86|2328.27|863.44| +2452301|48530|2452363|16311|80393|184753|1913|39826|71596|197177|3259|13619|30|3|19|1|46|52|64|70.26|89.93|73.74|1036.16|4719.36|4496.64|5755.52|0.00|0.00|1093.12|4719.36|4719.36|5812.48|5812.48|222.72| +2452301|48530|2452406|2331|80393|184753|1913|39826|71596|197177|3259|13619|33|13|14|4|201|52|61|17.47|48.91|13.20|2178.31|805.20|1065.67|2983.51|32.20|0.00|298.29|805.20|837.40|1103.49|1135.69|-260.47| +2452301|48530|2452388|17394|80393|184753|1913|39826|71596|197177|3259|13619|31|21|19|4|79|52|76|48.57|96.16|69.23|2046.68|5261.48|3691.32|7308.16|315.68|0.00|145.92|5261.48|5577.16|5407.40|5723.08|1570.16| +2452301|48530|2452394|7717|80393|184753|1913|39826|71596|197177|3259|13619|30|24|6|5|257|52|42|12.06|20.14|16.31|160.86|685.02|506.52|845.88|13.70|0.00|329.70|685.02|698.72|1014.72|1028.42|178.50| +2452301|48530|2452420|3585|80393|184753|1913|39826|71596|197177|3259|13619|21|12|11|4|104|52|74|7.00|19.25|18.67|42.92|1381.58|518.00|1424.50|69.07|0.00|355.94|1381.58|1450.65|1737.52|1806.59|863.58| +2452301|48530|2452355|12798|80393|184753|1913|39826|71596|197177|3259|13619|31|24|11|2|53|52|67|73.74|85.53|16.25|4641.76|1088.75|4940.58|5730.51|65.32|0.00|1833.12|1088.75|1154.07|2921.87|2987.19|-3851.83| +2452301|48530|2452369|13914|80393|184753|1913|39826|71596|197177|3259|13619|36|7|20|1|255|52|80|34.04|69.44|8.33|4888.80|666.40|2723.20|5555.20|59.97|0.00|2166.40|666.40|726.37|2832.80|2892.77|-2056.80| +2452301|48530|2452346|1536|80393|184753|1913|39826|71596|197177|3259|13619|33|21|5|2|196|52|72|23.71|34.37|6.53|2004.48|470.16|1707.12|2474.64|42.31|0.00|172.80|470.16|512.47|642.96|685.27|-1236.96| +2452301|48530|2452351|3312|80393|184753|1913|39826|71596|197177|3259|13619|57|19|18|4|87|52|10|47.43|71.14|58.33|128.10|583.30|474.30|711.40|23.33|0.00|135.10|583.30|606.63|718.40|741.73|109.00| +2452301|48530|2452363|5139|80393|184753|1913|39826|71596|197177|3259|13619|37|9|1|2|293|52|40|33.47|68.94|20.68|1930.40|827.20|1338.80|2757.60|74.44|0.00|1158.00|827.20|901.64|1985.20|2059.64|-511.60| +2451691|45951|2451809|1417|54041|1071263|5732|49354|42172|1238469|3067|31889|32|17|14|1|98|53|48|39.94|113.82|48.94|3114.24|2349.12|1917.12|5463.36|46.98|0.00|2021.28|2349.12|2396.10|4370.40|4417.38|432.00| +2451691|45951|2451705|13189|54041|1071263|5732|49354|42172|1238469|3067|31889|13|1|15|4|103|53|35|20.49|37.49|23.61|485.80|826.35|717.15|1312.15|0.00|0.00|314.65|826.35|826.35|1141.00|1141.00|109.20| +2451691|45951|2451722|5285|54041|1071263|5732|49354|42172|1238469|3067|31889|29|25|4|5|46|53|36|73.45|164.52|105.29|2132.28|3790.44|2644.20|5922.72|54.58|2880.73|2013.48|909.71|964.29|2923.19|2977.77|-1734.49| +2451691|45951|2451777|14582|54041|1071263|5732|49354|42172|1238469|3067|31889|37|1|3|5|7|53|38|44.37|100.71|56.39|1684.16|2142.82|1686.06|3826.98|0.00|0.00|344.28|2142.82|2142.82|2487.10|2487.10|456.76| +2451691|45951|2451705|16154|54041|1071263|5732|49354|42172|1238469|3067|31889|37|26|12|2|142|53|97|55.62|140.71|46.43|9145.16|4503.71|5395.14|13648.87|0.00|0.00|5322.39|4503.71|4503.71|9826.10|9826.10|-891.43| +2451691|45951|2451703|14507|54041|1071263|5732|49354|42172|1238469|3067|31889|13|1|5|2|186|53|83|57.56|123.75|122.51|102.92|10168.33|4777.48|10271.25|711.78|0.00|3388.89|10168.33|10880.11|13557.22|14269.00|5390.85| +2451691|45951|2451719|8843|54041|1071263|5732|49354|42172|1238469|3067|31889|55|5|5|2|249|53|26|11.63|25.58|24.04|40.04|625.04|302.38|665.08|18.75|0.00|166.14|625.04|643.79|791.18|809.93|322.66| +2451691|45951|2451705|2435|54041|1071263|5732|49354|42172|1238469|3067|31889|35|11|7|3|189|53|38|5.89|13.84|3.46|394.44|131.48|223.82|525.92|2.76|39.44|73.34|92.04|94.80|165.38|168.14|-131.78| +2452594|39658|2452689|10080|17082|911361|4040|39519|83234|1635849|1956|19435|3|7|6|3|100|54|39|54.46|84.41|70.90|526.89|2765.10|2123.94|3291.99|221.20|0.00|296.01|2765.10|2986.30|3061.11|3282.31|641.16| +2452594|39658|2452638|15561|17082|911361|4040|39519|83234|1635849|1956|19435|49|7|16|3|107|54|18|80.04|226.51|38.50|3384.18|693.00|1440.72|4077.18|0.00|200.97|1263.78|492.03|492.03|1755.81|1755.81|-948.69| +2452594|39658|2452614|14373|17082|911361|4040|39519|83234|1635849|1956|19435|43|7|16|1|33|54|45|66.53|90.48|42.52|2158.20|1913.40|2993.85|4071.60|153.07|0.00|814.05|1913.40|2066.47|2727.45|2880.52|-1080.45| +2452594|39658|2452661|3228|17082|911361|4040|39519|83234|1635849|1956|19435|7|12|18|5|102|54|58|33.83|73.41|24.22|2853.02|1404.76|1962.14|4257.78|42.14|0.00|808.52|1404.76|1446.90|2213.28|2255.42|-557.38| +2452594|39658|2452670|14709|17082|911361|4040|39519|83234|1635849|1956|19435|12|27|12|5|38|54|85|94.67|131.59|65.79|5593.00|5592.15|8046.95|11185.15|167.76|0.00|4250.00|5592.15|5759.91|9842.15|10009.91|-2454.80| +2452594|39658|2452658|13281|17082|911361|4040|39519|83234|1635849|1956|19435|1|1|14|2|190|54|87|98.33|232.05|215.80|1413.75|18774.60|8554.71|20188.35|1284.18|4505.90|4239.51|14268.70|15552.88|18508.21|19792.39|5713.99| +2452594|39658|2452603|4905|17082|911361|4040|39519|83234|1635849|1956|19435|45|3|11|2|156|54|82|39.36|104.69|39.78|5322.62|3261.96|3227.52|8584.58|0.00|0.00|600.24|3261.96|3261.96|3862.20|3862.20|34.44| +2452594|39658|2452599|13983|17082|911361|4040|39519|83234|1635849|1956|19435|54|9|6|4|255|54|5|59.63|119.26|9.54|548.60|47.70|298.15|596.30|0.95|0.00|262.35|47.70|48.65|310.05|311.00|-250.45| +2452594|39658|2452666|15555|17082|911361|4040|39519|83234|1635849|1956|19435|21|24|20|3|149|54|77|44.08|52.45|47.20|404.25|3634.40|3394.16|4038.65|0.00|0.00|847.77|3634.40|3634.40|4482.17|4482.17|240.24| +2451788|53647|2451904|2129|9887|1273192|5650|4555|63252|1721892|5176|41681|37|17|6|3|270|55|3|72.56|214.77|133.15|244.86|399.45|217.68|644.31|31.95|0.00|122.40|399.45|431.40|521.85|553.80|181.77| +2451788|53647|2451791|17666|9887|1273192|5650|4555|63252|1721892|5176|41681|7|17|7|5|220|55|62|80.41|159.21|113.03|2863.16|7007.86|4985.42|9871.02|0.00|0.00|2862.54|7007.86|7007.86|9870.40|9870.40|2022.44| +2451788|53647|2451903|6559|9887|1273192|5650|4555|63252|1721892|5176|41681|49|19|15|5|235|55|28|50.96|137.08|5.48|3684.80|153.44|1426.88|3838.24|7.67|0.00|575.68|153.44|161.11|729.12|736.79|-1273.44| +2451788|53647|2451849|15277|9887|1273192|5650|4555|63252|1721892|5176|41681|19|13|4|1|21|55|19|28.96|77.61|23.28|1032.27|442.32|550.24|1474.59|8.84|0.00|265.24|442.32|451.16|707.56|716.40|-107.92| +2451788|53647|2451859|8378|9887|1273192|5650|4555|63252|1721892|5176|41681|14|19|2|4|136|55|57|70.89|192.82|179.32|769.50|10221.24|4040.73|10990.74|0.00|0.00|4176.39|10221.24|10221.24|14397.63|14397.63|6180.51| +2451788|53647|2451843|10471|9887|1273192|5650|4555|63252|1721892|5176|41681|55|19|9|3|7|55|100|80.00|94.40|19.82|7458.00|1982.00|8000.00|9440.00|59.46|0.00|1604.00|1982.00|2041.46|3586.00|3645.46|-6018.00| +2451788|53647|2451877|12053|9887|1273192|5650|4555|63252|1721892|5176|41681|44|11|19|2|82|55|72|33.05|56.51|44.07|895.68|3173.04|2379.60|4068.72|158.65|0.00|1667.52|3173.04|3331.69|4840.56|4999.21|793.44| +2451788|53647|2451898|5401|9887|1273192|5650|4555|63252|1721892|5176|41681|31|26|5|3|1|55|93|2.16|2.70|1.05|153.45|97.65|200.88|251.10|2.92|0.00|16.74|97.65|100.57|114.39|117.31|-103.23| +2451788|53647|2451876|16082|9887|1273192|5650|4555|63252|1721892|5176|41681|53|20|8|2|159|55|62|14.92|16.56|10.10|400.52|626.20|925.04|1026.72|43.83|0.00|451.36|626.20|670.03|1077.56|1121.39|-298.84| +2451788|53647|2451886|11165|9887|1273192|5650|4555|63252|1721892|5176|41681|53|13|17|5|150|55|96|74.35|179.18|173.80|516.48|16684.80|7137.60|17201.28|333.69|0.00|3783.36|16684.80|17018.49|20468.16|20801.85|9547.20| +2451788|53647|2451885|12013|9887|1273192|5650|4555|63252|1721892|5176|41681|50|11|9|4|59|55|26|96.74|155.75|10.90|3766.10|283.40|2515.24|4049.50|17.00|0.00|1538.68|283.40|300.40|1822.08|1839.08|-2231.84| +2451788|53647|2451858|17660|9887|1273192|5650|4555|63252|1721892|5176|41681|44|7|15|1|157|55|73|39.00|79.17|43.54|2600.99|3178.42|2847.00|5779.41|254.27|0.00|2715.60|3178.42|3432.69|5894.02|6148.29|331.42| +2451788|53647|2451866|6979|9887|1273192|5650|4555|63252|1721892|5176|41681|17|29|11|4|220|55|44|56.84|91.51|68.63|1006.72|3019.72|2500.96|4026.44|150.98|0.00|483.12|3019.72|3170.70|3502.84|3653.82|518.76| +2451788|53647|2451809|11509|9887|1273192|5650|4555|63252|1721892|5176|41681|8|14|11|4|294|55|94|93.40|243.77|63.38|16956.66|5957.72|8779.60|22914.38|59.57|0.00|8248.50|5957.72|6017.29|14206.22|14265.79|-2821.88| +2451788|53647|2451847|10085|9887|1273192|5650|4555|63252|1721892|5176|41681|14|26|3|4|235|55|35|54.20|81.30|7.31|2589.65|255.85|1897.00|2845.50|7.67|0.00|512.05|255.85|263.52|767.90|775.57|-1641.15| +2450832|77425|2450877|7783|91077|845709|341|42573|63692|506079|4916|45195|19|22|11|3|32|56|90|43.05|68.01|0.68|6059.70|61.20|3874.50|6120.90|1.22|0.00|1774.80|61.20|62.42|1836.00|1837.22|-3813.30| +2450832|77425|2450940|2336|91077|845709|341|42573|63692|506079|4916|45195|8|28|15|5|225|56|81|41.30|89.62|68.11|1742.31|5516.91|3345.30|7259.22|496.52|0.00|2032.29|5516.91|6013.43|7549.20|8045.72|2171.61| +2450832|77425|2450839|15643|91077|845709|341|42573|63692|506079|4916|45195|32|16|11|3|75|56|83|55.35|148.89|116.13|2719.08|9638.79|4594.05|12357.87|192.77|0.00|370.18|9638.79|9831.56|10008.97|10201.74|5044.74| +2450832|77425|2450917|16540|91077|845709|341|42573|63692|506079|4916|45195|14|13|6|5|34|56|57|60.02|158.45|134.68|1354.89|7676.76|3421.14|9031.65|460.60|0.00|541.50|7676.76|8137.36|8218.26|8678.86|4255.62| +2450832|77425|2450862|17467|91077|845709|341|42573|63692|506079|4916|45195|58|10|2|3|84|56|100|73.38|109.33|22.95|8638.00|2295.00|7338.00|10933.00|2.29|2249.10|1639.00|45.90|48.19|1684.90|1687.19|-7292.10| +2450832|77425|2450844|11918|91077|845709|341|42573|63692|506079|4916|45195|49|8|2|4|86|56|86|19.69|58.47|11.10|4073.82|954.60|1693.34|5028.42|85.91|0.00|2162.04|954.60|1040.51|3116.64|3202.55|-738.74| +2450832|77425|2450873|15670|91077|845709|341|42573|63692|506079|4916|45195|34|22|16|5|114|56|39|35.66|67.04|52.29|575.25|2039.31|1390.74|2614.56|40.78|0.00|522.60|2039.31|2080.09|2561.91|2602.69|648.57| +2450832|77425|2450937|2203|91077|845709|341|42573|63692|506079|4916|45195|25|13|1|2|296|56|42|94.50|120.01|63.60|2369.22|2671.20|3969.00|5040.42|45.67|2163.67|1360.80|507.53|553.20|1868.33|1914.00|-3461.47| +2450832|77425|2450881|748|91077|845709|341|42573|63692|506079|4916|45195|56|16|9|4|39|56|73|21.98|51.65|29.44|1621.33|2149.12|1604.54|3770.45|42.98|0.00|602.98|2149.12|2192.10|2752.10|2795.08|544.58| +2450832|77425|2450836|13480|91077|845709|341|42573|63692|506079|4916|45195|49|7|20|4|278|56|79|91.52|175.71|144.08|2498.77|11382.32|7230.08|13881.09|0.00|0.00|1248.99|11382.32|11382.32|12631.31|12631.31|4152.24| +2451251|30422|2451275|985|15972|27168|411|4807|80022|1239567|4842|45685|58|8|10|5|20|57|53|96.07|221.92|199.72|1176.60|10585.16|5091.71|11761.76|635.10|0.00|3645.87|10585.16|11220.26|14231.03|14866.13|5493.45| +2451251|30422|2451360|1765|15972|27168|411|4807|80022|1239567|4842|45685|10|13|8|4|250|57|23|12.07|15.32|14.40|21.16|331.20|277.61|352.36|6.62|0.00|155.02|331.20|337.82|486.22|492.84|53.59| +2451251|30422|2451320|17870|15972|27168|411|4807|80022|1239567|4842|45685|32|10|5|1|10|57|62|84.90|141.78|32.60|6769.16|2021.20|5263.80|8790.36|121.27|0.00|438.96|2021.20|2142.47|2460.16|2581.43|-3242.60| +2451251|30422|2451264|13990|15972|27168|411|4807|80022|1239567|4842|45685|28|22|2|4|289|57|51|96.36|102.14|77.62|1250.52|3958.62|4914.36|5209.14|106.88|2177.24|52.02|1781.38|1888.26|1833.40|1940.28|-3132.98| +2451251|30422|2451267|4466|15972|27168|411|4807|80022|1239567|4842|45685|44|14|10|5|150|57|40|39.22|114.91|57.45|2298.40|2298.00|1568.80|4596.40|68.94|0.00|1516.80|2298.00|2366.94|3814.80|3883.74|729.20| +2451251|30422|2451312|7999|15972|27168|411|4807|80022|1239567|4842|45685|34|13|16|5|274|57|56|70.51|198.83|105.37|5233.76|5900.72|3948.56|11134.48|354.04|0.00|445.20|5900.72|6254.76|6345.92|6699.96|1952.16| +2451251|30422|2451278|7358|15972|27168|411|4807|80022|1239567|4842|45685|32|7|11|3|175|57|14|57.41|99.31|82.42|236.46|1153.88|803.74|1390.34|0.00|0.00|625.52|1153.88|1153.88|1779.40|1779.40|350.14| +2451251|30422|2451335|9877|15972|27168|411|4807|80022|1239567|4842|45685|20|28|19|3|117|57|16|35.92|41.30|33.45|125.60|535.20|574.72|660.80|32.11|0.00|92.48|535.20|567.31|627.68|659.79|-39.52| +2451251|30422|2451330|11149|15972|27168|411|4807|80022|1239567|4842|45685|10|26|5|1|227|57|97|95.63|234.29|72.62|15681.99|7044.14|9276.11|22726.13|493.08|0.00|3862.54|7044.14|7537.22|10906.68|11399.76|-2231.97| +2451284|24184|2451287|1759|18917|614640|763|48039|54232|918925|6761|48084|1|19|6|5|141|58|74|20.42|33.89|7.11|1981.72|526.14|1511.08|2507.86|1.57|499.83|877.64|26.31|27.88|903.95|905.52|-1484.77| +2451284|24184|2451344|11342|18917|614640|763|48039|54232|918925|6761|48084|13|7|11|3|32|58|36|97.13|177.74|127.97|1791.72|4606.92|3496.68|6398.64|0.00|1520.28|2879.28|3086.64|3086.64|5965.92|5965.92|-410.04| +2451284|24184|2451386|6380|18917|614640|763|48039|54232|918925|6761|48084|56|28|1|2|153|58|1|19.35|35.60|7.83|27.77|7.83|19.35|35.60|0.07|0.00|10.68|7.83|7.90|18.51|18.58|-11.52| +2451284|24184|2451291|4376|18917|614640|763|48039|54232|918925|6761|48084|44|8|17|4|184|58|47|4.16|9.48|0.28|432.40|13.16|195.52|445.56|0.35|4.34|111.39|8.82|9.17|120.21|120.56|-186.70| +2451284|24184|2451319|17689|18917|614640|763|48039|54232|918925|6761|48084|26|14|2|1|163|58|57|64.09|66.65|55.98|608.19|3190.86|3653.13|3799.05|159.54|0.00|531.81|3190.86|3350.40|3722.67|3882.21|-462.27| +2451284|24184|2451291|2144|18917|614640|763|48039|54232|918925|6761|48084|20|20|4|2|260|58|40|73.71|114.25|82.26|1279.60|3290.40|2948.40|4570.00|98.71|0.00|319.60|3290.40|3389.11|3610.00|3708.71|342.00| +2451284|24184|2451403|5528|18917|614640|763|48039|54232|918925|6761|48084|52|4|19|2|208|58|26|89.45|222.73|184.86|984.62|4806.36|2325.70|5790.98|144.19|0.00|0.00|4806.36|4950.55|4806.36|4950.55|2480.66| +2451284|24184|2451403|13064|18917|614640|763|48039|54232|918925|6761|48084|26|7|17|2|20|58|13|72.56|203.89|73.40|1696.37|954.20|943.28|2650.57|47.71|0.00|795.08|954.20|1001.91|1749.28|1796.99|10.92| +2451284|24184|2451368|12380|18917|614640|763|48039|54232|918925|6761|48084|14|8|2|4|183|58|27|33.73|99.50|31.84|1826.82|859.68|910.71|2686.50|68.77|0.00|1262.52|859.68|928.45|2122.20|2190.97|-51.03| +2451284|24184|2451292|8167|18917|614640|763|48039|54232|918925|6761|48084|26|28|3|1|219|58|43|46.62|105.36|94.82|453.22|4077.26|2004.66|4530.48|203.86|0.00|45.15|4077.26|4281.12|4122.41|4326.27|2072.60| +2451284|24184|2451309|6302|18917|614640|763|48039|54232|918925|6761|48084|55|26|10|1|25|58|31|19.14|47.85|0.00|1483.35|0.00|593.34|1483.35|0.00|0.00|533.82|0.00|0.00|533.82|533.82|-593.34| +2451284|24184|2451346|3937|18917|614640|763|48039|54232|918925|6761|48084|10|8|4|5|159|58|30|65.83|117.17|9.37|3234.00|281.10|1974.90|3515.10|1.34|146.17|1405.80|134.93|136.27|1540.73|1542.07|-1839.97| +2451284|24184|2451350|17437|18917|614640|763|48039|54232|918925|6761|48084|58|1|12|3|124|58|62|4.19|5.40|1.94|214.52|120.28|259.78|334.80|7.21|0.00|156.86|120.28|127.49|277.14|284.35|-139.50| +2451484|62929|2451509|17162|33521|1812017|2960|29903|54903|853546|4685|35981|38|19|7|2|234|59|97|72.15|98.12|53.96|4283.52|5234.12|6998.55|9517.64|209.36|0.00|2474.47|5234.12|5443.48|7708.59|7917.95|-1764.43| +2451484|62929|2451596|14096|33521|1812017|2960|29903|54903|853546|4685|35981|38|26|8|4|93|59|72|30.97|39.95|30.36|690.48|2185.92|2229.84|2876.40|196.73|0.00|229.68|2185.92|2382.65|2415.60|2612.33|-43.92| +2451484|62929|2451572|13633|33521|1812017|2960|29903|54903|853546|4685|35981|34|14|8|3|38|59|30|26.19|41.64|36.64|150.00|1099.20|785.70|1249.20|87.93|0.00|224.70|1099.20|1187.13|1323.90|1411.83|313.50| +2451484|62929|2451582|1316|33521|1812017|2960|29903|54903|853546|4685|35981|50|8|17|1|168|59|12|92.35|121.90|81.67|482.76|980.04|1108.20|1462.80|78.40|0.00|614.28|980.04|1058.44|1594.32|1672.72|-128.16| +2451484|62929|2451563|13132|33521|1812017|2960|29903|54903|853546|4685|35981|32|4|2|3|157|59|88|85.97|132.39|38.39|8272.00|3378.32|7565.36|11650.32|135.13|0.00|2213.20|3378.32|3513.45|5591.52|5726.65|-4187.04| +2451484|62929|2451568|10658|33521|1812017|2960|29903|54903|853546|4685|35981|43|1|7|1|102|59|70|82.70|138.10|26.23|7830.90|1836.10|5789.00|9667.00|73.44|0.00|1159.90|1836.10|1909.54|2996.00|3069.44|-3952.90| +2451484|62929|2451531|1057|33521|1812017|2960|29903|54903|853546|4685|35981|25|14|6|1|136|59|53|38.90|114.75|97.53|912.66|5169.09|2061.70|6081.75|310.14|0.00|2249.85|5169.09|5479.23|7418.94|7729.08|3107.39| +2451484|62929|2451604|5918|33521|1812017|2960|29903|54903|853546|4685|35981|43|22|12|5|185|59|28|56.48|109.57|97.51|337.68|2730.28|1581.44|3067.96|191.11|0.00|920.36|2730.28|2921.39|3650.64|3841.75|1148.84| +2451484|62929|2451589|4420|33521|1812017|2960|29903|54903|853546|4685|35981|22|20|20|4|128|59|54|8.92|25.42|18.30|384.48|988.20|481.68|1372.68|19.76|0.00|178.20|988.20|1007.96|1166.40|1186.16|506.52| +2451484|62929|2451522|16933|33521|1812017|2960|29903|54903|853546|4685|35981|34|7|10|1|292|59|81|47.85|121.06|38.73|6668.73|3137.13|3875.85|9805.86|237.16|501.94|882.09|2635.19|2872.35|3517.28|3754.44|-1240.66| +2451060|34923|2451146|8629|4759|535841|6570|49652|1574|810653|3345|15274|40|14|12|4|73|60|42|6.80|17.34|14.56|116.76|611.52|285.60|728.28|36.69|0.00|57.96|611.52|648.21|669.48|706.17|325.92| +2451060|34923|2451151|1736|4759|535841|6570|49652|1574|810653|3345|15274|40|26|7|5|53|60|66|75.14|103.69|67.39|2395.80|4447.74|4959.24|6843.54|88.95|0.00|67.98|4447.74|4536.69|4515.72|4604.67|-511.50| +2451060|34923|2451068|6412|4759|535841|6570|49652|1574|810653|3345|15274|38|14|13|1|46|60|27|34.07|93.35|1.86|2470.23|50.22|919.89|2520.45|3.51|0.00|1083.78|50.22|53.73|1134.00|1137.51|-869.67| +2451060|34923|2451100|16339|4759|535841|6570|49652|1574|810653|3345|15274|7|14|2|3|207|60|99|15.08|17.34|14.91|240.57|1476.09|1492.92|1716.66|0.00|0.00|858.33|1476.09|1476.09|2334.42|2334.42|-16.83| +2451060|34923|2451110|1556|4759|535841|6570|49652|1574|810653|3345|15274|58|19|10|5|227|60|95|8.34|10.67|4.05|628.90|384.75|792.30|1013.65|23.08|0.00|384.75|384.75|407.83|769.50|792.58|-407.55| +2451060|34923|2451100|14683|4759|535841|6570|49652|1574|810653|3345|15274|16|20|16|3|280|60|18|26.23|71.34|23.54|860.40|423.72|472.14|1284.12|24.78|148.30|269.64|275.42|300.20|545.06|569.84|-196.72| +2451060|34923|2451098|5642|4759|535841|6570|49652|1574|810653|3345|15274|46|14|9|5|131|60|86|11.15|28.09|8.98|1643.46|772.28|958.90|2415.74|15.44|0.00|0.00|772.28|787.72|772.28|787.72|-186.62| +2451060|34923|2451101|10288|4759|535841|6570|49652|1574|810653|3345|15274|25|16|2|5|101|60|71|93.32|154.91|117.73|2639.78|8358.83|6625.72|10998.61|488.99|2925.59|2859.17|5433.24|5922.23|8292.41|8781.40|-1192.48| +2451060|34923|2451180|211|4759|535841|6570|49652|1574|810653|3345|15274|22|13|12|5|105|60|47|74.89|179.73|120.41|2788.04|5659.27|3519.83|8447.31|169.77|0.00|2787.57|5659.27|5829.04|8446.84|8616.61|2139.44| +2450821|49678|2450839|4424|85218|575823|828|14233|85218|575823|828|14233|10|22|1|2|46|61|8|58.27|117.70|1.17|932.24|9.36|466.16|941.60|0.30|1.68|423.68|7.68|7.98|431.36|431.66|-458.48| +2450821|49678|2450861|2311|85218|575823|828|14233|85218|575823|828|14233|8|20|17|1|52|61|3|41.26|49.51|14.85|103.98|44.55|123.78|148.53|3.56|0.00|60.87|44.55|48.11|105.42|108.98|-79.23| +2450821|49678|2450896|15494|85218|575823|828|14233|85218|575823|828|14233|43|25|14|5|106|61|10|64.43|169.45|138.94|305.10|1389.40|644.30|1694.50|0.00|0.00|474.40|1389.40|1389.40|1863.80|1863.80|745.10| +2450821|49678|2450858|8002|85218|575823|828|14233|85218|575823|828|14233|25|8|4|3|122|61|48|30.57|49.21|16.73|1559.04|803.04|1467.36|2362.08|48.90|104.39|519.36|698.65|747.55|1218.01|1266.91|-768.71| +2450821|49678|2450849|16477|85218|575823|828|14233|85218|575823|828|14233|1|26|17|5|53|61|50|41.35|124.05|96.75|1365.00|4837.50|2067.50|6202.50|387.00|0.00|1984.50|4837.50|5224.50|6822.00|7209.00|2770.00| +2450821|49678|2450904|10138|85218|575823|828|14233|85218|575823|828|14233|10|8|4|1|55|61|65|95.74|256.58|141.11|7505.55|9172.15|6223.10|16677.70|275.16|0.00|166.40|9172.15|9447.31|9338.55|9613.71|2949.05| +2450821|49678|2450839|8452|85218|575823|828|14233|85218|575823|828|14233|32|16|18|2|119|61|21|49.67|67.05|9.38|1211.07|196.98|1043.07|1408.05|5.90|0.00|563.22|196.98|202.88|760.20|766.10|-846.09| +2450821|49678|2450880|3343|85218|575823|828|14233|85218|575823|828|14233|50|19|14|5|119|61|32|42.65|96.38|79.99|524.48|2559.68|1364.80|3084.16|230.37|0.00|215.68|2559.68|2790.05|2775.36|3005.73|1194.88| +2450821|49678|2450859|17122|85218|575823|828|14233|85218|575823|828|14233|31|22|1|2|16|61|99|19.42|49.52|43.57|589.05|4313.43|1922.58|4902.48|86.26|0.00|1470.15|4313.43|4399.69|5783.58|5869.84|2390.85| +2450821|49678|2450924|13783|85218|575823|828|14233|85218|575823|828|14233|10|28|4|3|102|61|47|12.51|21.51|8.60|606.77|404.20|587.97|1010.97|16.16|0.00|90.71|404.20|420.36|494.91|511.07|-183.77| +2450821|49678|2450912|10184|85218|575823|828|14233|85218|575823|828|14233|20|16|7|3|206|61|67|61.72|135.16|112.18|1539.66|7516.06|4135.24|9055.72|526.12|0.00|3622.02|7516.06|8042.18|11138.08|11664.20|3380.82| +2450821|49678|2450935|2155|85218|575823|828|14233|85218|575823|828|14233|52|19|16|4|42|61|96|79.59|227.62|34.14|18574.08|3277.44|7640.64|21851.52|0.00|0.00|10925.76|3277.44|3277.44|14203.20|14203.20|-4363.20| +2450821|49678|2450861|5764|85218|575823|828|14233|85218|575823|828|14233|10|7|2|5|261|61|24|73.07|99.37|32.79|1597.92|786.96|1753.68|2384.88|31.47|0.00|786.96|786.96|818.43|1573.92|1605.39|-966.72| +2451505|60774|2451610|14443|85517|1119630|496|14209|56648|1762228|2101|47014|8|7|13|2|181|62|48|73.14|182.11|100.16|3933.60|4807.68|3510.72|8741.28|384.61|0.00|524.16|4807.68|5192.29|5331.84|5716.45|1296.96| +2451505|60774|2451597|9254|85517|1119630|496|14209|56648|1762228|2101|47014|8|16|13|2|231|62|68|19.89|27.64|23.77|263.16|1616.36|1352.52|1879.52|145.47|0.00|883.32|1616.36|1761.83|2499.68|2645.15|263.84| +2451505|60774|2451524|16075|85517|1119630|496|14209|56648|1762228|2101|47014|10|25|10|4|101|62|52|71.03|166.92|23.36|7465.12|1214.72|3693.56|8679.84|0.00|0.00|1909.44|1214.72|1214.72|3124.16|3124.16|-2478.84| +2451505|60774|2451517|1474|85517|1119630|496|14209|56648|1762228|2101|47014|31|22|18|1|277|62|27|15.73|42.62|32.81|264.87|885.87|424.71|1150.74|26.57|0.00|402.57|885.87|912.44|1288.44|1315.01|461.16| +2451505|60774|2451597|3532|85517|1119630|496|14209|56648|1762228|2101|47014|34|16|10|1|162|62|82|6.88|18.98|3.22|1292.32|264.04|564.16|1556.36|5.28|0.00|77.08|264.04|269.32|341.12|346.40|-300.12| +2451505|60774|2451620|16036|85517|1119630|496|14209|56648|1762228|2101|47014|1|16|10|3|86|62|35|37.25|76.73|42.96|1181.95|1503.60|1303.75|2685.55|75.18|0.00|590.80|1503.60|1578.78|2094.40|2169.58|199.85| +2451505|60774|2451517|6940|85517|1119630|496|14209|56648|1762228|2101|47014|56|4|7|1|15|62|5|36.93|55.02|31.91|115.55|159.55|184.65|275.10|0.00|0.00|112.75|159.55|159.55|272.30|272.30|-25.10| +2451505|60774|2451596|8252|85517|1119630|496|14209|56648|1762228|2101|47014|38|10|15|1|47|62|94|63.72|159.93|94.35|6164.52|8868.90|5989.68|15033.42|46.11|4257.07|5561.98|4611.83|4657.94|10173.81|10219.92|-1377.85| +2451500|66633|2451550|1702|53134|14005|4476|20147|38730|1354568|5113|9184|13|2|17|2|257|63|69|60.11|88.36|6.18|5670.42|426.42|4147.59|6096.84|8.52|0.00|1097.10|426.42|434.94|1523.52|1532.04|-3721.17| +2451500|66633|2451531|12037|53134|14005|4476|20147|38730|1354568|5113|9184|26|1|8|5|292|63|77|72.59|213.41|93.90|9202.27|7230.30|5589.43|16432.57|274.75|1735.27|5093.55|5495.03|5769.78|10588.58|10863.33|-94.40| +2451500|66633|2451577|7369|53134|14005|4476|20147|38730|1354568|5113|9184|32|14|9|5|281|63|30|31.03|43.44|38.22|156.60|1146.60|930.90|1303.20|8.48|722.35|364.80|424.25|432.73|789.05|797.53|-506.65| +2451500|66633|2451592|7378|53134|14005|4476|20147|38730|1354568|5113|9184|46|2|20|1|139|63|21|82.15|210.30|159.82|1060.08|3356.22|1725.15|4416.30|201.37|0.00|1589.70|3356.22|3557.59|4945.92|5147.29|1631.07| +2451500|66633|2451603|6536|53134|14005|4476|20147|38730|1354568|5113|9184|55|14|5|5|142|63|31|43.29|115.58|40.45|2329.03|1253.95|1341.99|3582.98|32.35|714.75|644.80|539.20|571.55|1184.00|1216.35|-802.79| +2451500|66633|2451616|15130|53134|14005|4476|20147|38730|1354568|5113|9184|26|19|16|4|5|63|53|63.95|171.38|128.53|2271.05|6812.09|3389.35|9083.14|272.48|0.00|1271.47|6812.09|7084.57|8083.56|8356.04|3422.74| +2451500|66633|2451577|14287|53134|14005|4476|20147|38730|1354568|5113|9184|46|16|4|4|114|63|68|46.08|98.61|46.34|3554.36|3151.12|3133.44|6705.48|126.04|0.00|2145.40|3151.12|3277.16|5296.52|5422.56|17.68| +2451500|66633|2451597|799|53134|14005|4476|20147|38730|1354568|5113|9184|55|2|9|1|270|63|80|92.96|242.62|50.95|15333.60|4076.00|7436.80|19409.60|326.08|0.00|7957.60|4076.00|4402.08|12033.60|12359.68|-3360.80| +2451500|66633|2451582|16402|53134|14005|4476|20147|38730|1354568|5113|9184|31|20|16|3|243|63|66|43.62|88.11|51.10|2442.66|3372.60|2878.92|5815.26|0.00|0.00|1860.54|3372.60|3372.60|5233.14|5233.14|493.68| +2451500|66633|2451511|15430|53134|14005|4476|20147|38730|1354568|5113|9184|52|22|17|2|140|63|14|91.59|262.86|202.40|846.44|2833.60|1282.26|3680.04|170.01|0.00|1508.78|2833.60|3003.61|4342.38|4512.39|1551.34| +2451500|66633|2451517|2630|53134|14005|4476|20147|38730|1354568|5113|9184|50|26|5|3|61|63|50|16.33|42.78|39.35|171.50|1967.50|816.50|2139.00|108.01|767.32|342.00|1200.18|1308.19|1542.18|1650.19|383.68| +2451500|66633|2451503|4636|53134|14005|4476|20147|38730|1354568|5113|9184|1|1|18|3|8|63|84|37.17|97.01|77.60|1630.44|6518.40|3122.28|8148.84|391.10|0.00|407.40|6518.40|6909.50|6925.80|7316.90|3396.12| +2451073|44810|2451090|3319|782|1915675|4968|10214|77544|1088240|5246|17339|34|25|18|3|95|64|17|13.85|15.23|1.67|230.52|28.39|235.45|258.91|1.70|0.00|67.15|28.39|30.09|95.54|97.24|-207.06| +2451073|44810|2451158|14062|782|1915675|4968|10214|77544|1088240|5246|17339|43|10|20|4|228|64|80|26.74|58.02|30.17|2228.00|2413.60|2139.20|4641.60|72.40|0.00|2273.60|2413.60|2486.00|4687.20|4759.60|274.40| +2451073|44810|2451178|4930|782|1915675|4968|10214|77544|1088240|5246|17339|46|10|8|3|65|64|87|48.02|129.65|50.56|6880.83|4398.72|4177.74|11279.55|87.97|0.00|2029.71|4398.72|4486.69|6428.43|6516.40|220.98| +2451073|44810|2451140|5008|782|1915675|4968|10214|77544|1088240|5246|17339|10|20|6|4|19|64|51|38.61|44.40|22.64|1109.76|1154.64|1969.11|2264.40|46.18|0.00|814.98|1154.64|1200.82|1969.62|2015.80|-814.47| +2451073|44810|2451176|7285|782|1915675|4968|10214|77544|1088240|5246|17339|19|28|10|5|300|64|96|44.34|91.34|71.24|1929.60|6839.04|4256.64|8768.64|478.73|0.00|3506.88|6839.04|7317.77|10345.92|10824.65|2582.40| +2451073|44810|2451171|10772|782|1915675|4968|10214|77544|1088240|5246|17339|43|7|3|5|47|64|27|16.39|22.94|7.79|409.05|210.33|442.53|619.38|2.10|0.00|129.87|210.33|212.43|340.20|342.30|-232.20| +2451073|44810|2451122|238|782|1915675|4968|10214|77544|1088240|5246|17339|14|10|4|2|120|64|76|23.26|47.91|33.05|1129.36|2511.80|1767.76|3641.16|75.35|0.00|728.08|2511.80|2587.15|3239.88|3315.23|744.04| +2451073|44810|2451131|14348|782|1915675|4968|10214|77544|1088240|5246|17339|25|8|4|3|115|64|31|41.17|87.69|14.90|2256.49|461.90|1276.27|2718.39|12.56|147.80|815.30|314.10|326.66|1129.40|1141.96|-962.17| +2451072|31719|2451182|12742|26772|378122|5919|6069|88160|1267011|1719|33916|28|22|12|4|176|65|59|14.71|17.94|0.53|1027.19|31.27|867.89|1058.46|0.93|0.00|73.75|31.27|32.20|105.02|105.95|-836.62| +2451072|31719|2451108|14242|26772|378122|5919|6069|88160|1267011|1719|33916|28|28|15|3|188|65|57|65.79|185.52|89.04|5499.36|5075.28|3750.03|10574.64|101.50|0.00|3701.01|5075.28|5176.78|8776.29|8877.79|1325.25| +2451072|31719|2451088|604|26772|378122|5919|6069|88160|1267011|1719|33916|16|10|13|5|20|65|66|60.18|158.27|14.24|9505.98|939.84|3971.88|10445.82|56.39|0.00|312.84|939.84|996.23|1252.68|1309.07|-3032.04| +2451072|31719|2451154|7054|26772|378122|5919|6069|88160|1267011|1719|33916|16|2|8|4|170|65|16|1.79|5.10|2.24|45.76|35.84|28.64|81.60|1.43|0.00|40.80|35.84|37.27|76.64|78.07|7.20| +2451072|31719|2451121|2317|26772|378122|5919|6069|88160|1267011|1719|33916|31|10|16|1|36|65|7|28.33|54.39|11.96|297.01|83.72|198.31|380.73|0.07|76.18|41.86|7.54|7.61|49.40|49.47|-190.77| +2451072|31719|2451156|5776|26772|378122|5919|6069|88160|1267011|1719|33916|55|4|18|3|205|65|67|29.45|55.36|7.19|3227.39|481.73|1973.15|3709.12|28.90|0.00|259.29|481.73|510.63|741.02|769.92|-1491.42| +2451072|31719|2451075|12526|26772|378122|5919|6069|88160|1267011|1719|33916|19|7|18|2|46|65|64|10.38|25.22|4.03|1356.16|257.92|664.32|1614.08|1.70|87.69|807.04|170.23|171.93|977.27|978.97|-494.09| +2451072|31719|2451174|12316|26772|378122|5919|6069|88160|1267011|1719|33916|44|20|2|3|231|65|58|28.90|61.55|44.93|963.96|2605.94|1676.20|3569.90|234.53|0.00|749.36|2605.94|2840.47|3355.30|3589.83|929.74| +2451072|31719|2451082|1952|26772|378122|5919|6069|88160|1267011|1719|33916|8|2|12|4|129|65|40|93.48|193.50|158.67|1393.20|6346.80|3739.20|7740.00|444.27|0.00|2322.00|6346.80|6791.07|8668.80|9113.07|2607.60| +2451072|31719|2451185|12106|26772|378122|5919|6069|88160|1267011|1719|33916|16|2|12|2|55|65|96|40.38|105.39|66.39|3744.00|6373.44|3876.48|10117.44|0.00|0.00|404.16|6373.44|6373.44|6777.60|6777.60|2496.96| +2451072|31719|2451118|5893|26772|378122|5919|6069|88160|1267011|1719|33916|52|14|6|2|78|65|68|62.77|134.95|9.44|8534.68|641.92|4268.36|9176.60|38.51|0.00|2660.84|641.92|680.43|3302.76|3341.27|-3626.44| +2451072|31719|2451088|13250|26772|378122|5919|6069|88160|1267011|1719|33916|25|4|10|5|190|65|63|80.84|164.10|19.69|9097.83|1240.47|5092.92|10338.30|0.00|0.00|4341.96|1240.47|1240.47|5582.43|5582.43|-3852.45| +2451334|82725|2451344|11893|97747|570993|2639|24285|28739|188604|6823|26022|50|20|16|1|116|66|24|19.98|43.35|0.86|1019.76|20.64|479.52|1040.40|1.85|0.00|478.56|20.64|22.49|499.20|501.05|-458.88| +2451334|82725|2451381|1832|97747|570993|2639|24285|28739|188604|6823|26022|10|1|6|1|249|66|27|49.18|121.47|40.08|2197.53|1082.16|1327.86|3279.69|26.62|638.47|787.05|443.69|470.31|1230.74|1257.36|-884.17| +2451334|82725|2451356|17270|97747|570993|2639|24285|28739|188604|6823|26022|26|1|18|3|195|66|60|30.05|61.30|52.71|515.40|3162.60|1803.00|3678.00|0.00|0.00|882.60|3162.60|3162.60|4045.20|4045.20|1359.60| +2451334|82725|2451346|17582|97747|570993|2639|24285|28739|188604|6823|26022|7|20|5|2|196|66|7|2.47|4.59|0.78|26.67|5.46|17.29|32.13|0.05|2.83|7.00|2.63|2.68|9.63|9.68|-14.66| +2451334|82725|2451414|17662|97747|570993|2639|24285|28739|188604|6823|26022|37|22|8|1|44|66|42|74.77|198.14|160.49|1581.30|6740.58|3140.34|8321.88|5.39|6470.95|1331.40|269.63|275.02|1601.03|1606.42|-2870.71| +2451334|82725|2451454|3098|97747|570993|2639|24285|28739|188604|6823|26022|49|1|10|1|35|66|94|17.90|51.19|50.67|48.88|4762.98|1682.60|4811.86|428.66|0.00|529.22|4762.98|5191.64|5292.20|5720.86|3080.38| +2451334|82725|2451412|4201|97747|570993|2639|24285|28739|188604|6823|26022|19|26|15|4|70|66|73|39.86|40.25|28.17|881.84|2056.41|2909.78|2938.25|0.00|0.00|1145.37|2056.41|2056.41|3201.78|3201.78|-853.37| +2451334|82725|2451437|715|97747|570993|2639|24285|28739|188604|6823|26022|10|16|12|5|18|66|91|32.27|40.66|15.85|2257.71|1442.35|2936.57|3700.06|13.55|86.54|665.21|1355.81|1369.36|2021.02|2034.57|-1580.76| +2451334|82725|2451353|2410|97747|570993|2639|24285|28739|188604|6823|26022|37|7|19|4|199|66|81|23.28|67.27|65.92|109.35|5339.52|1885.68|5448.87|427.16|0.00|1089.45|5339.52|5766.68|6428.97|6856.13|3453.84| +2451334|82725|2451384|5395|97747|570993|2639|24285|28739|188604|6823|26022|7|2|3|4|35|66|47|74.21|110.57|105.04|259.91|4936.88|3487.87|5196.79|246.84|0.00|1558.99|4936.88|5183.72|6495.87|6742.71|1449.01| +2451334|82725|2451409|13192|97747|570993|2639|24285|28739|188604|6823|26022|1|19|9|3|7|66|15|29.03|63.86|5.74|871.80|86.10|435.45|957.90|4.30|0.00|316.05|86.10|90.40|402.15|406.45|-349.35| +2451334|82725|2451355|2180|97747|570993|2639|24285|28739|188604|6823|26022|44|20|2|5|268|66|89|7.96|9.63|7.41|197.58|659.49|708.44|857.07|6.59|0.00|33.82|659.49|666.08|693.31|699.90|-48.95| +2451871|52842|2451877|5653|862|752153|4691|20829|1777|1782422|3196|25334|53|11|15|4|280|67|35|18.04|33.55|14.09|681.10|493.15|631.40|1174.25|4.93|0.00|457.80|493.15|498.08|950.95|955.88|-138.25| +2451871|52842|2451902|16922|862|752153|4691|20829|1777|1782422|3196|25334|29|11|17|5|46|67|11|62.05|84.38|2.53|900.35|27.83|682.55|928.18|0.00|0.00|389.73|27.83|27.83|417.56|417.56|-654.72| +2451871|52842|2451885|5545|862|752153|4691|20829|1777|1782422|3196|25334|26|14|2|1|290|67|47|20.16|38.30|9.95|1332.45|467.65|947.52|1800.10|9.35|0.00|647.66|467.65|477.00|1115.31|1124.66|-479.87| +2451871|52842|2451950|12067|862|752153|4691|20829|1777|1782422|3196|25334|19|7|8|4|23|67|18|19.92|59.36|40.36|342.00|726.48|358.56|1068.48|5.81|653.83|256.32|72.65|78.46|328.97|334.78|-285.91| +2451871|52842|2451955|10259|862|752153|4691|20829|1777|1782422|3196|25334|41|20|11|1|71|67|38|88.90|149.35|116.49|1248.68|4426.62|3378.20|5675.30|88.53|0.00|1816.02|4426.62|4515.15|6242.64|6331.17|1048.42| +2451871|52842|2451968|7339|862|752153|4691|20829|1777|1782422|3196|25334|8|29|8|3|1|67|36|67.81|84.76|78.82|213.84|2837.52|2441.16|3051.36|170.25|0.00|1281.24|2837.52|3007.77|4118.76|4289.01|396.36| +2451871|52842|2451891|4406|862|752153|4691|20829|1777|1782422|3196|25334|7|25|2|1|160|67|11|75.10|190.75|162.13|314.82|1783.43|826.10|2098.25|0.00|0.00|860.20|1783.43|1783.43|2643.63|2643.63|957.33| +2451871|52842|2451953|10022|862|752153|4691|20829|1777|1782422|3196|25334|26|1|18|3|225|67|68|76.76|98.25|93.33|334.56|6346.44|5219.68|6681.00|441.71|825.03|2337.84|5521.41|5963.12|7859.25|8300.96|301.73| +2451871|52842|2451881|9539|862|752153|4691|20829|1777|1782422|3196|25334|14|14|18|5|15|67|67|55.03|106.20|6.37|6688.61|426.79|3687.01|7115.40|21.33|0.00|995.62|426.79|448.12|1422.41|1443.74|-3260.22| +2451871|52842|2451907|12617|862|752153|4691|20829|1777|1782422|3196|25334|37|5|17|1|225|67|81|91.76|128.46|110.47|1457.19|8948.07|7432.56|10405.26|243.38|2863.38|4786.29|6084.69|6328.07|10870.98|11114.36|-1347.87| +2451871|52842|2451966|6175|862|752153|4691|20829|1777|1782422|3196|25334|31|5|20|4|241|67|10|26.54|31.58|6.94|246.40|69.40|265.40|315.80|0.69|0.00|60.00|69.40|70.09|129.40|130.09|-196.00| +2451871|52842|2451898|4814|862|752153|4691|20829|1777|1782422|3196|25334|59|19|20|5|40|67|16|44.60|119.97|37.19|1324.48|595.04|713.60|1919.52|11.90|0.00|844.48|595.04|606.94|1439.52|1451.42|-118.56| +2451871|52842|2451900|4322|862|752153|4691|20829|1777|1782422|3196|25334|55|2|13|4|88|67|10|24.47|49.67|49.67|0.00|496.70|244.70|496.70|0.00|0.00|4.90|496.70|496.70|501.60|501.60|252.00| +2451871|52842|2451883|15929|862|752153|4691|20829|1777|1782422|3196|25334|56|29|11|2|183|67|35|99.45|124.31|88.26|1261.75|3089.10|3480.75|4350.85|278.01|0.00|1696.80|3089.10|3367.11|4785.90|5063.91|-391.65| +2451871|52842|2451989|8369|862|752153|4691|20829|1777|1782422|3196|25334|31|17|9|2|152|67|5|97.24|216.84|86.73|650.55|433.65|486.20|1084.20|17.34|0.00|520.40|433.65|450.99|954.05|971.39|-52.55| +2451111|7790|2451145|16916|34219|956323|827|44131|96141|526889|2843|7347|56|7|16|1|26|68|9|36.11|81.60|38.35|389.25|345.15|324.99|734.40|20.70|0.00|58.68|345.15|365.85|403.83|424.53|20.16| +2451111|7790|2451168|15382|34219|956323|827|44131|96141|526889|2843|7347|31|7|9|4|162|68|3|59.37|143.08|123.04|60.12|369.12|178.11|429.24|8.08|99.66|154.50|269.46|277.54|423.96|432.04|91.35| +2451111|7790|2451148|3664|34219|956323|827|44131|96141|526889|2843|7347|38|19|10|5|190|68|43|4.78|11.28|8.00|141.04|344.00|205.54|485.04|20.64|0.00|154.80|344.00|364.64|498.80|519.44|138.46| +2451111|7790|2451144|13448|34219|956323|827|44131|96141|526889|2843|7347|44|13|1|5|259|68|18|44.75|115.00|57.50|1035.00|1035.00|805.50|2070.00|72.45|0.00|496.80|1035.00|1107.45|1531.80|1604.25|229.50| +2451111|7790|2451219|17134|34219|956323|827|44131|96141|526889|2843|7347|16|7|12|5|155|68|90|35.34|100.01|2.00|8820.90|180.00|3180.60|9000.90|10.80|0.00|2790.00|180.00|190.80|2970.00|2980.80|-3000.60| +2451111|7790|2451209|6670|34219|956323|827|44131|96141|526889|2843|7347|55|22|6|5|84|68|4|38.27|61.23|60.61|2.48|242.44|153.08|244.92|0.24|237.59|41.60|4.85|5.09|46.45|46.69|-148.23| +2451111|7790|2451197|283|34219|956323|827|44131|96141|526889|2843|7347|4|4|13|3|250|68|31|66.00|143.88|58.99|2631.59|1828.69|2046.00|4460.28|164.58|0.00|1070.43|1828.69|1993.27|2899.12|3063.70|-217.31| +2451111|7790|2451203|13963|34219|956323|827|44131|96141|526889|2843|7347|20|8|12|1|220|68|37|35.05|101.29|87.10|525.03|3222.70|1296.85|3747.73|0.00|0.00|74.74|3222.70|3222.70|3297.44|3297.44|1925.85| +2451111|7790|2451220|3607|34219|956323|827|44131|96141|526889|2843|7347|43|1|7|3|109|68|72|55.53|126.05|1.26|8984.88|90.72|3998.16|9075.60|1.81|0.00|181.44|90.72|92.53|272.16|273.97|-3907.44| +2451111|7790|2451195|908|34219|956323|827|44131|96141|526889|2843|7347|28|25|4|2|203|68|90|66.09|138.78|29.14|9867.60|2622.60|5948.10|12490.20|26.22|0.00|2747.70|2622.60|2648.82|5370.30|5396.52|-3325.50| +2451111|7790|2451119|11198|34219|956323|827|44131|96141|526889|2843|7347|38|8|17|2|64|68|22|45.23|117.14|114.79|51.70|2525.38|995.06|2577.08|101.01|0.00|1004.96|2525.38|2626.39|3530.34|3631.35|1530.32| +2451111|7790|2451171|4136|34219|956323|827|44131|96141|526889|2843|7347|16|16|19|4|129|68|84|25.23|66.60|33.30|2797.20|2797.20|2119.32|5594.40|139.86|0.00|614.88|2797.20|2937.06|3412.08|3551.94|677.88| +2451111|7790|2451114|5320|34219|956323|827|44131|96141|526889|2843|7347|58|2|14|1|188|68|69|71.68|177.76|110.21|4660.95|7604.49|4945.92|12265.44|182.50|5323.14|3556.95|2281.35|2463.85|5838.30|6020.80|-2664.57| +2451111|7790|2451160|6368|34219|956323|827|44131|96141|526889|2843|7347|20|28|6|5|281|68|38|5.19|7.57|2.72|184.30|103.36|197.22|287.66|1.03|0.00|68.78|103.36|104.39|172.14|173.17|-93.86| +2451887|75024|2451965|11261|64487|1617286|3238|36037|38806|1840690|5987|25986|19|11|15|1|250|69|64|55.44|99.23|45.64|3429.76|2920.96|3548.16|6350.72|175.25|0.00|888.96|2920.96|3096.21|3809.92|3985.17|-627.20| +2451887|75024|2451937|14857|64487|1617286|3238|36037|38806|1840690|5987|25986|47|2|20|5|183|69|67|44.05|123.34|13.56|7355.26|908.52|2951.35|8263.78|36.34|0.00|1487.40|908.52|944.86|2395.92|2432.26|-2042.83| +2451887|75024|2451917|17383|64487|1617286|3238|36037|38806|1840690|5987|25986|2|2|2|5|295|69|38|61.16|101.52|89.33|463.22|3394.54|2324.08|3857.76|4.07|3292.70|1465.66|101.84|105.91|1567.50|1571.57|-2222.24| +2451887|75024|2451965|53|64487|1617286|3238|36037|38806|1840690|5987|25986|1|23|3|4|135|69|76|16.82|25.90|20.72|393.68|1574.72|1278.32|1968.40|15.74|0.00|98.04|1574.72|1590.46|1672.76|1688.50|296.40| +2451887|75024|2451908|9902|64487|1617286|3238|36037|38806|1840690|5987|25986|55|19|12|1|113|69|95|37.28|65.98|36.28|2821.50|3446.60|3541.60|6268.10|21.36|2378.15|1190.35|1068.45|1089.81|2258.80|2280.16|-2473.15| +2451887|75024|2451892|4103|64487|1617286|3238|36037|38806|1840690|5987|25986|23|1|3|5|277|69|54|41.12|80.18|7.21|3940.38|389.34|2220.48|4329.72|0.00|0.00|1428.30|389.34|389.34|1817.64|1817.64|-1831.14| +2451887|75024|2451954|15599|64487|1617286|3238|36037|38806|1840690|5987|25986|55|1|4|4|77|69|27|35.53|83.85|47.79|973.62|1290.33|959.31|2263.95|64.51|0.00|837.54|1290.33|1354.84|2127.87|2192.38|331.02| +2451887|75024|2451896|8509|64487|1617286|3238|36037|38806|1840690|5987|25986|29|25|8|3|268|69|22|22.84|63.72|59.89|84.26|1317.58|502.48|1401.84|92.23|0.00|42.02|1317.58|1409.81|1359.60|1451.83|815.10| +2451887|75024|2451954|13148|64487|1617286|3238|36037|38806|1840690|5987|25986|19|2|4|1|68|69|30|57.10|91.36|82.22|274.20|2466.60|1713.00|2740.80|172.66|0.00|794.70|2466.60|2639.26|3261.30|3433.96|753.60| +2451887|75024|2451975|17030|64487|1617286|3238|36037|38806|1840690|5987|25986|32|23|20|5|230|69|73|89.79|154.43|105.01|3607.66|7665.73|6554.67|11273.39|459.94|0.00|2141.82|7665.73|8125.67|9807.55|10267.49|1111.06| +2451973|69074|2451989|13799|18177|860787|2597|32760|84347|736031|4943|13106|51|29|17|1|215|70|42|5.71|16.67|13.33|140.28|559.86|239.82|700.14|0.00|67.18|168.00|492.68|492.68|660.68|660.68|252.86| +2451973|69074|2452044|7633|18177|860787|2597|32760|84347|736031|4943|13106|25|19|7|2|264|70|2|69.25|165.50|152.26|26.48|304.52|138.50|331.00|15.22|0.00|6.62|304.52|319.74|311.14|326.36|166.02| +2451973|69074|2451986|7721|18177|860787|2597|32760|84347|736031|4943|13106|1|7|20|3|179|70|27|74.91|140.83|70.41|1901.34|1901.07|2022.57|3802.41|171.09|0.00|684.18|1901.07|2072.16|2585.25|2756.34|-121.50| +2451973|69074|2452085|10441|18177|860787|2597|32760|84347|736031|4943|13106|57|27|19|4|149|70|13|79.90|120.64|119.43|15.73|1552.59|1038.70|1568.32|62.10|0.00|219.44|1552.59|1614.69|1772.03|1834.13|513.89| +2451973|69074|2452020|2911|18177|860787|2597|32760|84347|736031|4943|13106|21|29|9|5|184|70|8|78.72|109.42|93.00|131.36|744.00|629.76|875.36|14.88|0.00|175.04|744.00|758.88|919.04|933.92|114.24| +2451973|69074|2451980|9355|18177|860787|2597|32760|84347|736031|4943|13106|21|25|5|2|266|70|2|12.04|31.18|11.84|38.68|23.68|24.08|62.36|0.71|0.00|23.68|23.68|24.39|47.36|48.07|-0.40| +2451973|69074|2452062|1819|18177|860787|2597|32760|84347|736031|4943|13106|47|11|11|3|268|70|36|4.70|13.63|0.00|490.68|0.00|169.20|490.68|0.00|0.00|73.44|0.00|0.00|73.44|73.44|-169.20| +2451973|69074|2451998|8945|18177|860787|2597|32760|84347|736031|4943|13106|45|17|8|2|73|70|86|13.92|16.70|1.16|1336.44|99.76|1197.12|1436.20|0.00|0.00|617.48|99.76|99.76|717.24|717.24|-1097.36| +2451973|69074|2452053|6473|18177|860787|2597|32760|84347|736031|4943|13106|59|1|12|3|244|70|23|63.23|186.52|93.26|2144.98|2144.98|1454.29|4289.96|128.69|0.00|1158.28|2144.98|2273.67|3303.26|3431.95|690.69| +2451973|69074|2452038|3261|18177|860787|2597|32760|84347|736031|4943|13106|29|19|5|5|235|70|81|26.34|47.67|0.00|3861.27|0.00|2133.54|3861.27|0.00|0.00|463.32|0.00|0.00|463.32|463.32|-2133.54| +2451973|69074|2452013|4767|18177|860787|2597|32760|84347|736031|4943|13106|25|15|13|1|76|70|82|7.62|10.13|3.24|564.98|265.68|624.84|830.66|1.75|90.33|165.64|175.35|177.10|340.99|342.74|-449.49| +2451348|41853|2451456|16196|48452|1550645|1991|43784|15572|1372301|6669|48462|40|13|13|4|55|71|16|67.06|103.94|86.27|282.72|1380.32|1072.96|1663.04|0.00|0.00|332.48|1380.32|1380.32|1712.80|1712.80|307.36| +2451348|41853|2451452|8570|48452|1550645|1991|43784|15572|1372301|6669|48462|2|8|11|4|300|71|28|82.63|111.55|24.54|2436.28|687.12|2313.64|3123.40|54.96|0.00|437.08|687.12|742.08|1124.20|1179.16|-1626.52| +2451348|41853|2451444|10940|48452|1550645|1991|43784|15572|1372301|6669|48462|2|10|10|4|258|71|20|62.98|64.23|35.96|565.40|719.20|1259.60|1284.60|21.57|0.00|603.60|719.20|740.77|1322.80|1344.37|-540.40| +2451348|41853|2451438|8035|48452|1550645|1991|43784|15572|1372301|6669|48462|22|16|3|2|284|71|27|6.48|6.93|0.06|185.49|1.62|174.96|187.11|0.08|0.00|11.07|1.62|1.70|12.69|12.77|-173.34| +2451348|41853|2451468|4918|48452|1550645|1991|43784|15572|1372301|6669|48462|46|2|13|4|245|71|70|62.89|96.85|49.39|3322.20|3457.30|4402.30|6779.50|103.71|0.00|1355.90|3457.30|3561.01|4813.20|4916.91|-945.00| +2451348|41853|2451378|14281|48452|1550645|1991|43784|15572|1372301|6669|48462|44|26|15|4|40|71|70|65.38|179.79|145.62|2391.90|10193.40|4576.60|12585.30|0.00|0.00|628.60|10193.40|10193.40|10822.00|10822.00|5616.80| +2451348|41853|2451446|10939|48452|1550645|1991|43784|15572|1372301|6669|48462|8|1|17|3|229|71|9|99.54|287.67|198.49|802.62|1786.41|895.86|2589.03|7.68|1018.25|1268.55|768.16|775.84|2036.71|2044.39|-127.70| +2451348|41853|2451396|14474|48452|1550645|1991|43784|15572|1372301|6669|48462|56|19|1|4|230|71|14|36.18|41.24|31.34|138.60|438.76|506.52|577.36|30.71|0.00|63.42|438.76|469.47|502.18|532.89|-67.76| +2451348|41853|2451371|13408|48452|1550645|1991|43784|15572|1372301|6669|48462|28|2|12|4|129|71|14|36.63|105.49|50.63|768.04|708.82|512.82|1476.86|21.26|0.00|472.50|708.82|730.08|1181.32|1202.58|196.00| +2451348|41853|2451436|2527|48452|1550645|1991|43784|15572|1372301|6669|48462|32|16|11|3|278|71|85|70.71|178.89|114.48|5474.85|9730.80|6010.35|15205.65|326.95|1556.92|1064.20|8173.88|8500.83|9238.08|9565.03|2163.53| +2451348|41853|2451423|11995|48452|1550645|1991|43784|15572|1372301|6669|48462|46|20|15|2|46|71|45|31.55|93.70|46.85|2108.25|2108.25|1419.75|4216.50|84.33|0.00|843.30|2108.25|2192.58|2951.55|3035.88|688.50| +2451348|41853|2451359|2012|48452|1550645|1991|43784|15572|1372301|6669|48462|32|16|18|2|6|71|35|8.79|10.54|5.58|173.60|195.30|307.65|368.90|7.81|0.00|84.70|195.30|203.11|280.00|287.81|-112.35| +2451348|41853|2451354|11714|48452|1550645|1991|43784|15572|1372301|6669|48462|43|13|12|5|255|71|53|52.37|101.59|28.44|3876.95|1507.32|2775.61|5384.27|120.58|0.00|968.84|1507.32|1627.90|2476.16|2596.74|-1268.29| +2452129|70279|2452216|6577|90064|644490|6116|46095|90064|644490|6116|46095|51|19|7|1|105|72|23|69.02|195.32|187.50|179.86|4312.50|1587.46|4492.36|4.31|4226.25|1931.54|86.25|90.56|2017.79|2022.10|-1501.21| +2452129|70279|2452223|6831|90064|644490|6116|46095|90064|644490|6116|46095|47|21|18|5|2|72|100|95.29|141.02|66.27|7475.00|6627.00|9529.00|14102.00|596.43|0.00|282.00|6627.00|7223.43|6909.00|7505.43|-2902.00| +2452129|70279|2452235|5997|90064|644490|6116|46095|90064|644490|6116|46095|13|19|3|3|116|72|6|31.86|75.50|53.60|131.40|321.60|191.16|453.00|25.72|0.00|95.10|321.60|347.32|416.70|442.42|130.44| +2452129|70279|2452192|8669|90064|644490|6116|46095|90064|644490|6116|46095|37|1|13|5|277|72|52|34.99|74.17|57.11|887.12|2969.72|1819.48|3856.84|237.57|0.00|1426.88|2969.72|3207.29|4396.60|4634.17|1150.24| +2452129|70279|2452187|599|90064|644490|6116|46095|90064|644490|6116|46095|39|25|19|2|128|72|43|69.38|165.12|51.18|4899.42|2200.74|2983.34|7100.16|198.06|0.00|1703.66|2200.74|2398.80|3904.40|4102.46|-782.60| +2452129|70279|2452178|7013|90064|644490|6116|46095|90064|644490|6116|46095|57|25|20|2|214|72|14|42.79|50.92|36.15|206.78|506.10|599.06|712.88|20.95|156.89|85.54|349.21|370.16|434.75|455.70|-249.85| +2452129|70279|2452131|2405|90064|644490|6116|46095|90064|644490|6116|46095|57|13|5|3|271|72|38|21.33|46.92|2.34|1694.04|88.92|810.54|1782.96|0.88|0.00|320.72|88.92|89.80|409.64|410.52|-721.62| +2452129|70279|2452141|12447|90064|644490|6116|46095|90064|644490|6116|46095|45|1|9|2|182|72|10|6.46|19.12|14.14|49.80|141.40|64.60|191.20|5.65|0.00|66.90|141.40|147.05|208.30|213.95|76.80| +2452129|70279|2452249|2677|90064|644490|6116|46095|90064|644490|6116|46095|55|7|7|3|149|72|82|66.14|190.48|102.85|7185.66|8433.70|5423.48|15619.36|337.34|0.00|7497.26|8433.70|8771.04|15930.96|16268.30|3010.22| +2452228|66853|2452245|2337|92090|1415581|3519|18210|49682|1303843|330|41821|23|27|3|5|57|73|51|80.92|216.05|183.64|1652.91|9365.64|4126.92|11018.55|655.59|0.00|4296.75|9365.64|10021.23|13662.39|14317.98|5238.72| +2452228|66853|2452335|15643|92090|1415581|3519|18210|49682|1303843|330|41821|15|9|19|1|153|73|53|3.08|8.90|7.20|90.10|381.60|163.24|471.70|26.71|0.00|112.89|381.60|408.31|494.49|521.20|218.36| +2452228|66853|2452312|16541|92090|1415581|3519|18210|49682|1303843|330|41821|41|3|3|4|275|73|68|69.65|160.89|130.32|2078.76|8861.76|4736.20|10940.52|177.23|0.00|2734.96|8861.76|9038.99|11596.72|11773.95|4125.56| +2452228|66853|2452268|17467|92090|1415581|3519|18210|49682|1303843|330|41821|23|9|5|3|88|73|10|6.80|17.88|17.70|1.80|177.00|68.00|178.80|12.39|0.00|3.50|177.00|189.39|180.50|192.89|109.00| +2452228|66853|2452283|11919|92090|1415581|3519|18210|49682|1303843|330|41821|49|23|16|5|174|73|37|74.08|108.15|70.29|1400.82|2600.73|2740.96|4001.55|52.01|0.00|79.92|2600.73|2652.74|2680.65|2732.66|-140.23| +2452228|66853|2452316|15671|92090|1415581|3519|18210|49682|1303843|330|41821|29|7|3|2|31|73|12|55.64|153.56|52.21|1216.20|626.52|667.68|1842.72|0.00|0.00|55.20|626.52|626.52|681.72|681.72|-41.16| +2452228|66853|2452237|2203|92090|1415581|3519|18210|49682|1303843|330|41821|3|9|18|1|254|73|91|56.53|137.93|53.79|7656.74|4894.89|5144.23|12551.63|293.69|0.00|2007.46|4894.89|5188.58|6902.35|7196.04|-249.34| +2452228|66853|2452321|749|92090|1415581|3519|18210|49682|1303843|330|41821|51|27|2|3|271|73|65|28.20|31.30|18.46|834.60|1199.90|1833.00|2034.50|35.99|0.00|81.25|1199.90|1235.89|1281.15|1317.14|-633.10| +2452228|66853|2452320|13481|92090|1415581|3519|18210|49682|1303843|330|41821|13|27|14|1|201|73|15|51.67|88.35|13.25|1126.50|198.75|775.05|1325.25|0.00|0.00|119.25|198.75|198.75|318.00|318.00|-576.30| +2452562|82677|2452639|14736|2199|133121|4043|37765|3979|20382|4978|35500|45|1|19|2|258|74|24|91.85|194.72|118.77|1822.80|2850.48|2204.40|4673.28|171.02|0.00|1308.48|2850.48|3021.50|4158.96|4329.98|646.08| +2452562|82677|2452614|16410|2199|133121|4043|37765|3979|20382|4978|35500|57|9|18|4|150|74|9|28.82|72.33|20.97|462.24|188.73|259.38|650.97|16.98|0.00|123.66|188.73|205.71|312.39|329.37|-70.65| +2452562|82677|2452623|17329|2199|133121|4043|37765|3979|20382|4978|35500|9|3|2|2|212|74|55|37.45|60.66|2.42|3203.20|133.10|2059.75|3336.30|10.64|0.00|700.15|133.10|143.74|833.25|843.89|-1926.65| +2452562|82677|2452623|14535|2199|133121|4043|37765|3979|20382|4978|35500|60|21|5|1|37|74|95|65.05|152.86|105.47|4502.05|10019.65|6179.75|14521.70|400.78|0.00|4501.10|10019.65|10420.43|14520.75|14921.53|3839.90| +2452562|82677|2452636|17859|2199|133121|4043|37765|3979|20382|4978|35500|36|12|7|3|69|74|14|30.69|57.69|29.42|395.78|411.88|429.66|807.66|1.44|267.72|104.86|144.16|145.60|249.02|250.46|-285.50| +2452562|82677|2452602|463|2199|133121|4043|37765|3979|20382|4978|35500|42|6|8|3|265|74|7|33.43|61.51|60.27|8.68|421.89|234.01|430.57|33.75|0.00|25.83|421.89|455.64|447.72|481.47|187.88| +2452562|82677|2452610|4521|2199|133121|4043|37765|3979|20382|4978|35500|57|19|8|4|140|74|93|89.92|267.06|197.62|6457.92|18378.66|8362.56|24836.58|1286.50|0.00|6456.99|18378.66|19665.16|24835.65|26122.15|10016.10| +2452562|82677|2452650|13027|2199|133121|4043|37765|3979|20382|4978|35500|7|6|15|4|83|74|93|57.52|95.48|79.24|1510.32|7369.32|5349.36|8879.64|221.07|0.00|1597.74|7369.32|7590.39|8967.06|9188.13|2019.96| +2452562|82677|2452592|4476|2199|133121|4043|37765|3979|20382|4978|35500|21|13|3|4|85|74|69|42.30|64.71|20.06|3080.85|1384.14|2918.70|4464.99|55.36|0.00|1160.58|1384.14|1439.50|2544.72|2600.08|-1534.56| +2452497|48409|2452592|15625|32727|170314|1054|28641|45134|1247619|5708|34095|3|1|10|4|148|75|28|28.67|73.68|26.52|1320.48|742.56|802.76|2063.04|14.85|0.00|267.96|742.56|757.41|1010.52|1025.37|-60.20| +2452497|48409|2452617|15531|32727|170314|1054|28641|45134|1247619|5708|34095|45|9|17|4|41|75|1|69.82|203.87|112.12|91.75|112.12|69.82|203.87|4.48|0.00|16.30|112.12|116.60|128.42|132.90|42.30| +2452497|48409|2452609|9804|32727|170314|1054|28641|45134|1247619|5708|34095|19|6|16|1|79|75|66|45.59|85.70|40.27|2998.38|2657.82|3008.94|5656.20|79.73|0.00|2036.10|2657.82|2737.55|4693.92|4773.65|-351.12| +2452497|48409|2452523|4812|32727|170314|1054|28641|45134|1247619|5708|34095|45|1|16|4|2|75|69|94.01|175.79|82.62|6428.73|5700.78|6486.69|12129.51|228.03|0.00|2304.60|5700.78|5928.81|8005.38|8233.41|-785.91| +2452497|48409|2452507|8593|32727|170314|1054|28641|45134|1247619|5708|34095|51|21|16|1|259|75|63|73.12|131.61|92.12|2487.87|5803.56|4606.56|8291.43|174.10|0.00|0.00|5803.56|5977.66|5803.56|5977.66|1197.00| +2452497|48409|2452556|7383|32727|170314|1054|28641|45134|1247619|5708|34095|60|15|19|5|198|75|5|41.65|61.64|31.43|151.05|157.15|208.25|308.20|1.57|0.00|86.25|157.15|158.72|243.40|244.97|-51.10| +2452497|48409|2452535|1533|32727|170314|1054|28641|45134|1247619|5708|34095|57|15|14|3|42|75|84|33.23|79.41|76.23|267.12|6403.32|2791.32|6670.44|192.09|0.00|1600.20|6403.32|6595.41|8003.52|8195.61|3612.00| +2452497|48409|2452513|5143|32727|170314|1054|28641|45134|1247619|5708|34095|55|9|5|2|139|75|85|67.81|85.44|78.60|581.40|6681.00|5763.85|7262.40|0.00|2872.83|144.50|3808.17|3808.17|3952.67|3952.67|-1955.68| +2452497|48409|2452548|9108|32727|170314|1054|28641|45134|1247619|5708|34095|25|24|11|3|272|75|84|17.84|28.72|26.42|193.20|2219.28|1498.56|2412.48|177.54|0.00|1157.52|2219.28|2396.82|3376.80|3554.34|720.72| +2451713|63167|2451791|2120|39792|761829|2619|44256|64598|42136|273|11110|14|7|10|1|22|76|27|2.88|8.29|5.63|71.82|152.01|77.76|223.83|9.12|0.00|89.37|152.01|161.13|241.38|250.50|74.25| +2451713|63167|2451768|10111|39792|761829|2619|44256|64598|42136|273|11110|25|17|15|1|154|76|47|45.92|56.02|44.81|526.87|2106.07|2158.24|2632.94|42.12|0.00|894.88|2106.07|2148.19|3000.95|3043.07|-52.17| +2451713|63167|2451769|17725|39792|761829|2619|44256|64598|42136|273|11110|31|23|8|4|120|76|3|95.48|220.55|134.53|258.06|403.59|286.44|661.65|20.17|0.00|165.39|403.59|423.76|568.98|589.15|117.15| +2451713|63167|2451736|10034|39792|761829|2619|44256|64598|42136|273|11110|44|2|11|5|239|76|95|44.80|51.07|24.51|2523.20|2328.45|4256.00|4851.65|23.28|0.00|2036.80|2328.45|2351.73|4365.25|4388.53|-1927.55| +2451713|63167|2451772|7556|39792|761829|2619|44256|64598|42136|273|11110|53|23|14|4|11|76|88|58.87|148.94|1.48|12976.48|130.24|5180.56|13106.72|6.51|0.00|5242.16|130.24|136.75|5372.40|5378.91|-5050.32| +2451713|63167|2451728|9931|39792|761829|2619|44256|64598|42136|273|11110|32|20|8|2|9|76|35|41.56|44.88|16.15|1005.55|565.25|1454.60|1570.80|39.56|0.00|251.30|565.25|604.81|816.55|856.11|-889.35| +2451713|63167|2451785|3104|39792|761829|2619|44256|64598|42136|273|11110|26|20|8|3|295|76|6|65.00|68.25|51.87|98.28|311.22|390.00|409.50|12.44|0.00|114.66|311.22|323.66|425.88|438.32|-78.78| +2451713|63167|2451804|10039|39792|761829|2619|44256|64598|42136|273|11110|19|13|7|4|51|76|85|80.56|170.78|35.86|11468.20|3048.10|6847.60|14516.30|91.44|0.00|1741.65|3048.10|3139.54|4789.75|4881.19|-3799.50| +2451415|58912|2451496|5114|21966|1889753|3140|18927|22618|1818536|4688|14875|1|14|5|2|59|77|81|23.54|27.54|1.10|2141.64|89.10|1906.74|2230.74|2.67|0.00|780.03|89.10|91.77|869.13|871.80|-1817.64| +2451415|58912|2451489|11374|21966|1889753|3140|18927|22618|1818536|4688|14875|34|28|15|5|195|77|99|10.71|17.99|2.15|1568.16|212.85|1060.29|1781.01|14.89|0.00|462.33|212.85|227.74|675.18|690.07|-847.44| +2451415|58912|2451485|14284|21966|1889753|3140|18927|22618|1818536|4688|14875|43|20|10|2|151|77|19|39.97|52.76|3.16|942.40|60.04|759.43|1002.44|1.80|0.00|240.54|60.04|61.84|300.58|302.38|-699.39| +2451415|58912|2451524|7711|21966|1889753|3140|18927|22618|1818536|4688|14875|46|13|3|1|272|77|86|29.63|37.63|36.50|97.18|3139.00|2548.18|3236.18|251.12|0.00|420.54|3139.00|3390.12|3559.54|3810.66|590.82| +2451415|58912|2451535|8974|21966|1889753|3140|18927|22618|1818536|4688|14875|14|7|3|4|158|77|22|6.18|18.10|10.31|171.38|226.82|135.96|398.20|13.60|0.00|55.66|226.82|240.42|282.48|296.08|90.86| +2451415|58912|2451479|13370|21966|1889753|3140|18927|22618|1818536|4688|14875|13|20|5|4|293|77|44|96.31|276.40|38.69|10459.24|1702.36|4237.64|12161.60|85.11|0.00|4986.08|1702.36|1787.47|6688.44|6773.55|-2535.28| +2451415|58912|2451490|13900|21966|1889753|3140|18927|22618|1818536|4688|14875|32|1|12|1|87|77|45|2.78|3.36|0.70|119.70|31.50|125.10|151.20|1.89|0.00|10.35|31.50|33.39|41.85|43.74|-93.60| +2451415|58912|2451509|2014|21966|1889753|3140|18927|22618|1818536|4688|14875|46|2|10|5|213|77|11|71.24|213.00|176.79|398.31|1944.69|783.64|2343.00|155.57|0.00|984.06|1944.69|2100.26|2928.75|3084.32|1161.05| +2451415|58912|2451531|16210|21966|1889753|3140|18927|22618|1818536|4688|14875|37|19|1|2|216|77|12|99.79|237.50|232.75|57.00|2793.00|1197.48|2850.00|195.51|0.00|826.44|2793.00|2988.51|3619.44|3814.95|1595.52| +2451415|58912|2451437|16700|21966|1889753|3140|18927|22618|1818536|4688|14875|31|14|20|4|111|77|39|3.73|5.52|3.80|67.08|148.20|145.47|215.28|2.96|0.00|88.14|148.20|151.16|236.34|239.30|2.73| +2451415|58912|2451454|4490|21966|1889753|3140|18927|22618|1818536|4688|14875|7|10|18|2|259|77|89|30.01|33.01|29.04|353.33|2584.56|2670.89|2937.89|25.84|0.00|646.14|2584.56|2610.40|3230.70|3256.54|-86.33| +2451415|58912|2451478|13081|21966|1889753|3140|18927|22618|1818536|4688|14875|22|4|14|3|239|77|76|78.34|219.35|24.12|14837.48|1833.12|5953.84|16670.60|54.99|0.00|2666.84|1833.12|1888.11|4499.96|4554.95|-4120.72| +2451415|58912|2451434|14827|21966|1889753|3140|18927|22618|1818536|4688|14875|28|25|1|4|37|77|82|25.18|33.99|16.65|1421.88|1365.30|2064.76|2787.18|95.57|0.00|1281.66|1365.30|1460.87|2646.96|2742.53|-699.46| +2451415|58912|2451433|6271|21966|1889753|3140|18927|22618|1818536|4688|14875|44|2|8|2|133|77|37|71.18|170.12|134.39|1322.01|4972.43|2633.66|6294.44|99.44|0.00|2580.38|4972.43|5071.87|7552.81|7652.25|2338.77| +2451415|58912|2451432|8536|21966|1889753|3140|18927|22618|1818536|4688|14875|37|1|13|3|60|77|82|62.59|78.23|63.36|1219.34|5195.52|5132.38|6414.86|0.00|0.00|833.12|5195.52|5195.52|6028.64|6028.64|63.14| +2452521|43762|2452588|14743|45554|22294|3481|21421|54274|489532|4135|45322|31|6|7|1|225|78|79|64.22|151.55|25.76|9937.41|2035.04|5073.38|11972.45|142.45|0.00|4668.90|2035.04|2177.49|6703.94|6846.39|-3038.34| +2452521|43762|2452533|13944|45554|22294|3481|21421|54274|489532|4135|45322|42|18|7|3|26|78|37|97.45|277.73|272.17|205.72|10070.29|3605.65|10276.01|402.81|0.00|2157.84|10070.29|10473.10|12228.13|12630.94|6464.64| +2452521|43762|2452572|12499|45554|22294|3481|21421|54274|489532|4135|45322|49|30|12|2|232|78|93|80.37|111.71|82.66|2701.65|7687.38|7474.41|10389.03|0.00|0.00|3843.69|7687.38|7687.38|11531.07|11531.07|212.97| +2452521|43762|2452581|14359|45554|22294|3481|21421|54274|489532|4135|45322|3|21|13|2|84|78|63|93.71|236.14|226.69|595.35|14281.47|5903.73|14876.82|856.88|0.00|4164.93|14281.47|15138.35|18446.40|19303.28|8377.74| +2452521|43762|2452523|7284|45554|22294|3481|21421|54274|489532|4135|45322|31|7|12|5|35|78|14|70.49|143.79|4.31|1952.72|60.34|986.86|2013.06|0.00|0.00|140.84|60.34|60.34|201.18|201.18|-926.52| +2452521|43762|2452561|17269|45554|22294|3481|21421|54274|489532|4135|45322|3|1|13|2|171|78|22|58.15|126.18|105.99|444.18|2331.78|1279.30|2775.96|93.27|0.00|860.42|2331.78|2425.05|3192.20|3285.47|1052.48| +2452521|43762|2452538|1917|45554|22294|3481|21421|54274|489532|4135|45322|18|1|10|2|141|78|100|43.66|55.44|40.47|1497.00|4047.00|4366.00|5544.00|21.85|2954.31|1275.00|1092.69|1114.54|2367.69|2389.54|-3273.31| +2452521|43762|2452564|1554|45554|22294|3481|21421|54274|489532|4135|45322|48|30|19|3|4|78|69|11.85|31.40|26.37|347.07|1819.53|817.65|2166.60|37.84|873.37|563.04|946.16|984.00|1509.20|1547.04|128.51| +2452521|43762|2452597|11617|45554|22294|3481|21421|54274|489532|4135|45322|21|1|18|3|8|78|22|52.48|94.46|23.61|1558.70|519.42|1154.56|2078.12|10.38|0.00|124.52|519.42|529.80|643.94|654.32|-635.14| +2452521|43762|2452617|4495|45554|22294|3481|21421|54274|489532|4135|45322|1|19|2|1|203|78|72|72.49|138.45|34.61|7476.48|2491.92|5219.28|9968.40|0.00|0.00|2391.84|2491.92|2491.92|4883.76|4883.76|-2727.36| +2452521|43762|2452534|10669|45554|22294|3481|21421|54274|489532|4135|45322|30|24|9|4|236|78|77|40.98|86.05|63.67|1723.26|4902.59|3155.46|6625.85|392.20|0.00|662.20|4902.59|5294.79|5564.79|5956.99|1747.13| +2452521|43762|2452526|15780|45554|22294|3481|21421|54274|489532|4135|45322|21|13|3|2|288|78|83|64.71|110.00|38.50|5934.50|3195.50|5370.93|9130.00|63.91|0.00|2647.70|3195.50|3259.41|5843.20|5907.11|-2175.43| +2452521|43762|2452580|17544|45554|22294|3481|21421|54274|489532|4135|45322|54|25|2|3|227|78|26|66.54|135.07|109.40|667.42|2844.40|1730.04|3511.82|113.77|0.00|1545.18|2844.40|2958.17|4389.58|4503.35|1114.36| +2451535|77199|2451618|17995|81942|173188|4787|46833|81942|173188|4787|46833|58|28|10|5|270|79|72|67.23|201.01|116.58|6078.96|8393.76|4840.56|14472.72|587.56|0.00|6078.24|8393.76|8981.32|14472.00|15059.56|3553.20| +2451535|77199|2451581|2270|81942|173188|4787|46833|81942|173188|4787|46833|34|20|11|4|184|79|80|31.13|75.33|33.89|3315.20|2711.20|2490.40|6026.40|46.36|2196.07|360.80|515.13|561.49|875.93|922.29|-1975.27| +2451535|77199|2451624|1574|81942|173188|4787|46833|81942|173188|4787|46833|55|4|8|1|131|79|24|23.39|68.76|24.06|1072.80|577.44|561.36|1650.24|34.64|0.00|330.00|577.44|612.08|907.44|942.08|16.08| +2451535|77199|2451590|8932|81942|173188|4787|46833|81942|173188|4787|46833|7|1|11|3|195|79|21|31.41|78.21|43.79|722.82|919.59|659.61|1642.41|18.39|0.00|738.99|919.59|937.98|1658.58|1676.97|259.98| +2451535|77199|2451541|6610|81942|173188|4787|46833|81942|173188|4787|46833|22|20|20|4|188|79|15|25.21|41.59|28.69|193.50|430.35|378.15|623.85|0.00|0.00|162.15|430.35|430.35|592.50|592.50|52.20| +2451535|77199|2451609|7684|81942|173188|4787|46833|81942|173188|4787|46833|26|20|18|5|62|79|35|36.43|42.25|27.04|532.35|946.40|1275.05|1478.75|10.22|690.87|428.75|255.53|265.75|684.28|694.50|-1019.52| +2451535|77199|2451592|2456|81942|173188|4787|46833|81942|173188|4787|46833|55|14|18|3|195|79|10|28.97|57.36|0.57|567.90|5.70|289.70|573.60|0.28|0.00|57.30|5.70|5.98|63.00|63.28|-284.00| +2451535|77199|2451562|16660|81942|173188|4787|46833|81942|173188|4787|46833|1|13|16|5|214|79|70|12.83|22.45|20.87|110.60|1460.90|898.10|1571.50|14.60|0.00|314.30|1460.90|1475.50|1775.20|1789.80|562.80| +2451535|77199|2451619|1046|81942|173188|4787|46833|81942|173188|4787|46833|58|8|10|5|193|79|56|35.44|81.51|51.35|1688.96|2875.60|1984.64|4564.56|172.53|0.00|365.12|2875.60|3048.13|3240.72|3413.25|890.96| +2451535|77199|2451619|15106|81942|173188|4787|46833|81942|173188|4787|46833|1|14|18|3|38|79|91|42.84|80.53|33.82|4250.61|3077.62|3898.44|7328.23|75.70|553.97|2197.65|2523.65|2599.35|4721.30|4797.00|-1374.79| +2451535|77199|2451627|5564|81942|173188|4787|46833|81942|173188|4787|46833|14|25|11|1|111|79|74|12.01|32.42|7.45|1847.78|551.30|888.74|2399.08|19.29|165.39|695.60|385.91|405.20|1081.51|1100.80|-502.83| +2451535|77199|2451649|6667|81942|173188|4787|46833|81942|173188|4787|46833|16|19|8|4|57|79|79|42.16|86.00|4.30|6454.30|339.70|3330.64|6794.00|20.38|0.00|2649.66|339.70|360.08|2989.36|3009.74|-2990.94| +2451535|77199|2451647|12658|81942|173188|4787|46833|81942|173188|4787|46833|8|19|5|5|55|79|35|20.50|38.33|18.39|697.90|643.65|717.50|1341.55|10.04|476.30|80.15|167.35|177.39|247.50|257.54|-550.15| +2451535|77199|2451546|6452|81942|173188|4787|46833|81942|173188|4787|46833|28|14|1|4|77|79|55|50.17|110.37|86.08|1335.95|4734.40|2759.35|6070.35|426.09|0.00|1881.55|4734.40|5160.49|6615.95|7042.04|1975.05| +2450945|18919|2451064|10981|35692|452928|1811|48647|19010|1843359|2849|21685|37|28|11|2|123|80|73|81.35|159.44|28.69|9544.75|2094.37|5938.55|11639.12|18.63|230.38|3491.59|1863.99|1882.62|5355.58|5374.21|-4074.56| +2450945|18919|2450967|3040|35692|452928|1811|48647|19010|1843359|2849|21685|44|10|6|4|232|80|96|87.24|199.77|75.91|11890.56|7287.36|8375.04|19177.92|364.36|0.00|8246.40|7287.36|7651.72|15533.76|15898.12|-1087.68| +2450945|18919|2451006|16549|35692|452928|1811|48647|19010|1843359|2849|21685|1|25|5|2|229|80|18|90.24|158.82|115.93|772.02|2086.74|1624.32|2858.76|83.46|0.00|514.44|2086.74|2170.20|2601.18|2684.64|462.42| +2450945|18919|2450977|16306|35692|452928|1811|48647|19010|1843359|2849|21685|40|25|8|5|72|80|25|87.00|94.83|62.58|806.25|1564.50|2175.00|2370.75|62.58|0.00|237.00|1564.50|1627.08|1801.50|1864.08|-610.50| +2450945|18919|2451004|7606|35692|452928|1811|48647|19010|1843359|2849|21685|14|2|6|3|127|80|79|31.76|58.43|32.72|2031.09|2584.88|2509.04|4615.97|180.94|0.00|1107.58|2584.88|2765.82|3692.46|3873.40|75.84| +2450945|18919|2451060|17500|35692|452928|1811|48647|19010|1843359|2849|21685|50|7|15|1|194|80|90|11.50|29.09|11.34|1597.50|1020.60|1035.00|2618.10|81.64|0.00|314.10|1020.60|1102.24|1334.70|1416.34|-14.40| +2450945|18919|2450982|8419|35692|452928|1811|48647|19010|1843359|2849|21685|55|25|18|3|215|80|30|91.66|210.81|8.43|6071.40|252.90|2749.80|6324.30|10.11|0.00|2466.30|252.90|263.01|2719.20|2729.31|-2496.90| +2450945|18919|2451001|11504|35692|452928|1811|48647|19010|1843359|2849|21685|32|14|9|5|258|80|3|41.78|98.18|52.03|138.45|156.09|125.34|294.54|2.34|39.02|55.95|117.07|119.41|173.02|175.36|-8.27| +2450945|18919|2450966|8362|35692|452928|1811|48647|19010|1843359|2849|21685|37|13|5|5|128|80|76|1.01|1.23|0.25|74.48|19.00|76.76|93.48|0.76|0.00|46.36|19.00|19.76|65.36|66.12|-57.76| +2450945|18919|2450973|17227|35692|452928|1811|48647|19010|1843359|2849|21685|58|28|13|3|203|80|14|13.25|34.45|16.88|245.98|236.32|185.50|482.30|18.90|0.00|67.48|236.32|255.22|303.80|322.70|50.82| +2451499|69985|2451532|14104|99295|784813|5531|20202|23185|261525|5556|26274|1|1|1|3|218|81|55|48.37|92.38|9.23|4573.25|507.65|2660.35|5080.90|34.11|20.30|0.00|487.35|521.46|487.35|521.46|-2173.00| +2451499|69985|2451512|17002|99295|784813|5531|20202|23185|261525|5556|26274|7|14|5|2|126|81|100|76.39|176.46|155.28|2118.00|15528.00|7639.00|17646.00|621.12|0.00|2470.00|15528.00|16149.12|17998.00|18619.12|7889.00| +2451499|69985|2451529|4855|99295|784813|5531|20202|23185|261525|5556|26274|8|4|11|2|170|81|31|29.79|51.23|32.27|587.76|1000.37|923.49|1588.13|70.02|0.00|476.16|1000.37|1070.39|1476.53|1546.55|76.88| +2451499|69985|2451506|4369|99295|784813|5531|20202|23185|261525|5556|26274|4|7|18|4|60|81|59|88.07|226.33|43.00|10816.47|2537.00|5196.13|13353.47|0.00|0.00|2269.73|2537.00|2537.00|4806.73|4806.73|-2659.13| +2451499|69985|2451549|16040|99295|784813|5531|20202|23185|261525|5556|26274|34|4|18|5|115|81|36|42.03|82.37|7.41|2698.56|266.76|1513.08|2965.32|18.67|0.00|504.00|266.76|285.43|770.76|789.43|-1246.32| +2451499|69985|2451523|17599|99295|784813|5531|20202|23185|261525|5556|26274|50|14|20|5|277|81|7|48.19|143.60|63.18|562.94|442.26|337.33|1005.20|13.26|0.00|60.27|442.26|455.52|502.53|515.79|104.93| +2451499|69985|2451603|10174|99295|784813|5531|20202|23185|261525|5556|26274|19|2|14|1|151|81|56|67.76|84.02|21.84|3482.08|1223.04|3794.56|4705.12|97.84|0.00|1505.28|1223.04|1320.88|2728.32|2826.16|-2571.52| +2451499|69985|2451519|16180|99295|784813|5531|20202|23185|261525|5556|26274|44|28|17|4|111|81|8|32.81|95.80|45.98|398.56|367.84|262.48|766.40|22.07|0.00|383.20|367.84|389.91|751.04|773.11|105.36| +2451499|69985|2451547|10978|99295|784813|5531|20202|23185|261525|5556|26274|16|22|5|3|28|81|22|6.87|12.84|1.54|248.60|33.88|151.14|282.48|1.01|0.00|104.50|33.88|34.89|138.38|139.39|-117.26| +2451499|69985|2451588|10832|99295|784813|5531|20202|23185|261525|5556|26274|25|1|6|4|272|81|71|61.18|115.63|35.84|5665.09|2544.64|4343.78|8209.73|0.00|0.00|1231.14|2544.64|2544.64|3775.78|3775.78|-1799.14| +2451499|69985|2451505|10066|99295|784813|5531|20202|23185|261525|5556|26274|10|19|17|4|181|81|62|22.20|48.84|33.21|969.06|2059.02|1376.40|3028.08|0.00|0.00|1392.52|2059.02|2059.02|3451.54|3451.54|682.62| +2451499|69985|2451543|2539|99295|784813|5531|20202|23185|261525|5556|26274|55|13|6|5|34|81|18|28.74|57.76|46.78|197.64|842.04|517.32|1039.68|16.84|0.00|72.72|842.04|858.88|914.76|931.60|324.72| +2451499|69985|2451572|17101|99295|784813|5531|20202|23185|261525|5556|26274|37|16|12|2|182|81|71|32.15|86.16|18.95|4771.91|1345.45|2282.65|6117.36|107.63|0.00|672.37|1345.45|1453.08|2017.82|2125.45|-937.20| +2451499|69985|2451547|15265|99295|784813|5531|20202|23185|261525|5556|26274|14|19|12|4|129|81|97|82.95|181.66|167.12|1410.38|16210.64|8046.15|17621.02|1296.85|0.00|1584.98|16210.64|17507.49|17795.62|19092.47|8164.49| +2452050|48249|2452132|11503|55611|301065|119|31126|37013|61520|2288|9695|1|13|13|2|168|82|84|75.43|136.52|135.15|115.08|11352.60|6336.12|11467.68|234.99|8741.50|1031.52|2611.10|2846.09|3642.62|3877.61|-3725.02| +2452050|48249|2452147|5983|55611|301065|119|31126|37013|61520|2288|9695|35|11|17|3|39|82|75|88.75|118.03|86.16|2390.25|6462.00|6656.25|8852.25|323.10|0.00|3275.25|6462.00|6785.10|9737.25|10060.35|-194.25| +2452050|48249|2452136|4559|55611|301065|119|31126|37013|61520|2288|9695|29|17|10|1|155|82|62|79.36|161.10|16.11|8989.38|998.82|4920.32|9988.20|59.92|0.00|2396.92|998.82|1058.74|3395.74|3455.66|-3921.50| +2452050|48249|2452067|2393|55611|301065|119|31126|37013|61520|2288|9695|11|17|9|2|272|82|4|21.44|41.80|3.76|152.16|15.04|85.76|167.20|0.67|1.50|15.04|13.54|14.21|28.58|29.25|-72.22| +2452050|48249|2452132|10725|55611|301065|119|31126|37013|61520|2288|9695|51|1|16|1|136|82|55|29.56|54.09|48.68|297.55|2677.40|1625.80|2974.95|26.77|0.00|178.20|2677.40|2704.17|2855.60|2882.37|1051.60| +2452050|48249|2452161|14371|55611|301065|119|31126|37013|61520|2288|9695|55|3|9|1|233|82|71|11.34|30.16|25.33|342.93|1798.43|805.14|2141.36|143.87|0.00|770.35|1798.43|1942.30|2568.78|2712.65|993.29| +2452050|48249|2452156|2003|55611|301065|119|31126|37013|61520|2288|9695|13|13|7|4|44|82|87|70.32|82.27|76.51|501.12|6656.37|6117.84|7157.49|532.50|0.00|286.23|6656.37|7188.87|6942.60|7475.10|538.53| +2452050|48249|2452075|17851|55611|301065|119|31126|37013|61520|2288|9695|33|19|6|4|65|82|96|43.73|98.82|70.16|2751.36|6735.36|4198.08|9486.72|67.35|0.00|284.16|6735.36|6802.71|7019.52|7086.87|2537.28| +2452050|48249|2452085|6651|55611|301065|119|31126|37013|61520|2288|9695|17|9|1|2|83|82|21|17.57|18.97|8.91|211.26|187.11|368.97|398.37|3.74|0.00|159.18|187.11|190.85|346.29|350.03|-181.86| +2452050|48249|2452121|3715|55611|301065|119|31126|37013|61520|2288|9695|33|7|1|3|85|82|52|30.23|44.43|21.32|1201.72|1108.64|1571.96|2310.36|0.00|0.00|530.92|1108.64|1108.64|1639.56|1639.56|-463.32| +2452050|48249|2452079|13589|55611|301065|119|31126|37013|61520|2288|9695|3|5|7|3|299|82|61|50.15|117.85|56.56|3738.69|3450.16|3059.15|7188.85|310.51|0.00|1725.08|3450.16|3760.67|5175.24|5485.75|391.01| +2452339|12892|2452404|14904|2983|359033|683|37533|64757|1356958|4530|580|21|30|16|2|18|83|43|54.51|104.11|5.20|4253.13|223.60|2343.93|4476.73|0.00|0.00|402.48|223.60|223.60|626.08|626.08|-2120.33| +2452339|12892|2452342|15805|2983|359033|683|37533|64757|1356958|4530|580|1|1|11|3|98|83|25|34.43|40.62|9.74|772.00|243.50|860.75|1015.50|19.48|0.00|111.50|243.50|262.98|355.00|374.48|-617.25| +2452339|12892|2452430|12426|2983|359033|683|37533|64757|1356958|4530|580|36|7|17|3|269|83|54|56.58|92.22|9.22|4482.00|497.88|3055.32|4979.88|5.92|413.24|547.56|84.64|90.56|632.20|638.12|-2970.68| +2452339|12892|2452428|6852|2983|359033|683|37533|64757|1356958|4530|580|55|9|9|3|70|83|54|84.72|166.05|146.12|1076.22|7890.48|4574.88|8966.70|0.00|0.00|3945.24|7890.48|7890.48|11835.72|11835.72|3315.60| +2452339|12892|2452396|6093|2983|359033|683|37533|64757|1356958|4530|580|25|7|15|1|60|83|36|8.38|16.34|1.63|529.56|58.68|301.68|588.24|1.17|0.00|182.16|58.68|59.85|240.84|242.01|-243.00| +2452339|12892|2452450|4315|2983|359033|683|37533|64757|1356958|4530|580|42|18|20|3|26|83|73|18.35|40.18|29.33|792.05|2141.09|1339.55|2933.14|85.64|0.00|1289.91|2141.09|2226.73|3431.00|3516.64|801.54| +2452339|12892|2452391|5802|2983|359033|683|37533|64757|1356958|4530|580|7|1|5|4|186|83|97|49.55|111.98|6.71|10211.19|650.87|4806.35|10862.06|52.06|0.00|2280.47|650.87|702.93|2931.34|2983.40|-4155.48| +2452339|12892|2452422|9583|2983|359033|683|37533|64757|1356958|4530|580|1|15|2|4|216|83|46|85.54|205.29|192.97|566.72|8876.62|3934.84|9443.34|56.81|8166.49|2455.02|710.13|766.94|3165.15|3221.96|-3224.71| +2452339|12892|2452412|11775|2983|359033|683|37533|64757|1356958|4530|580|39|9|20|5|110|83|71|67.93|91.70|79.77|847.03|5663.67|4823.03|6510.70|0.00|0.00|1366.75|5663.67|5663.67|7030.42|7030.42|840.64| +2452339|12892|2452445|17749|2983|359033|683|37533|64757|1356958|4530|580|49|1|10|1|252|83|58|72.32|90.40|5.42|4928.84|314.36|4194.56|5243.20|0.00|216.90|733.70|97.46|97.46|831.16|831.16|-4097.10| +2452339|12892|2452411|1311|2983|359033|683|37533|64757|1356958|4530|580|43|15|17|3|289|83|82|86.28|258.84|10.35|20376.18|848.70|7074.96|21224.88|0.00|0.00|2546.92|848.70|848.70|3395.62|3395.62|-6226.26| +2452339|12892|2452374|10374|2983|359033|683|37533|64757|1356958|4530|580|60|24|17|4|191|83|68|47.61|111.40|83.55|1893.80|5681.40|3237.48|7575.20|454.51|0.00|757.52|5681.40|6135.91|6438.92|6893.43|2443.92| +2452339|12892|2452382|4753|2983|359033|683|37533|64757|1356958|4530|580|43|13|15|5|255|83|60|7.96|15.44|11.42|241.20|685.20|477.60|926.40|0.00|0.00|259.20|685.20|685.20|944.40|944.40|207.60| +2452339|12892|2452453|9564|2983|359033|683|37533|64757|1356958|4530|580|39|12|12|3|204|83|25|24.87|51.97|42.61|234.00|1065.25|621.75|1299.25|95.87|0.00|38.75|1065.25|1161.12|1104.00|1199.87|443.50| +2451121|45872|2451129|10498|36671|1428748|25|3702|22184|1122252|5580|10057|52|10|12|3|227|84|85|49.82|93.16|19.56|6256.00|1662.60|4234.70|7918.60|83.13|0.00|3959.30|1662.60|1745.73|5621.90|5705.03|-2572.10| +2451121|45872|2451233|13384|36671|1428748|25|3702|22184|1122252|5580|10057|22|20|20|4|218|84|50|24.08|58.51|18.13|2019.00|906.50|1204.00|2925.50|63.45|0.00|1023.50|906.50|969.95|1930.00|1993.45|-297.50| +2451121|45872|2451239|5200|36671|1428748|25|3702|22184|1122252|5580|10057|1|2|16|2|118|84|31|90.76|258.66|82.77|5452.59|2565.87|2813.56|8018.46|101.60|25.65|2405.29|2540.22|2641.82|4945.51|5047.11|-273.34| +2451121|45872|2451137|5443|36671|1428748|25|3702|22184|1122252|5580|10057|19|8|10|2|117|84|24|40.12|52.95|48.18|114.48|1156.32|962.88|1270.80|46.25|0.00|152.40|1156.32|1202.57|1308.72|1354.97|193.44| +2451121|45872|2451240|17638|36671|1428748|25|3702|22184|1122252|5580|10057|40|13|20|1|129|84|60|30.58|78.28|33.66|2677.20|2019.60|1834.80|4696.80|161.56|0.00|281.40|2019.60|2181.16|2301.00|2462.56|184.80| +2451121|45872|2451122|8791|36671|1428748|25|3702|22184|1122252|5580|10057|14|22|19|1|93|84|94|55.83|122.26|61.13|5746.22|5746.22|5248.02|11492.44|459.69|0.00|1493.66|5746.22|6205.91|7239.88|7699.57|498.20| +2451121|45872|2451163|13981|36671|1428748|25|3702|22184|1122252|5580|10057|38|28|10|2|277|84|98|29.69|35.33|25.08|1004.50|2457.84|2909.62|3462.34|49.15|0.00|830.06|2457.84|2506.99|3287.90|3337.05|-451.78| +2451121|45872|2451188|16586|36671|1428748|25|3702|22184|1122252|5580|10057|55|20|7|3|156|84|37|17.19|27.84|14.75|484.33|545.75|636.03|1030.08|5.45|0.00|185.37|545.75|551.20|731.12|736.57|-90.28| +2451121|45872|2451234|10358|36671|1428748|25|3702|22184|1122252|5580|10057|50|20|3|5|17|84|93|31.26|34.07|32.02|190.65|2977.86|2907.18|3168.51|59.55|0.00|728.19|2977.86|3037.41|3706.05|3765.60|70.68| +2451121|45872|2451234|4436|36671|1428748|25|3702|22184|1122252|5580|10057|2|19|5|3|78|84|61|10.28|28.98|8.40|1255.38|512.40|627.08|1767.78|25.82|143.47|300.12|368.93|394.75|669.05|694.87|-258.15| +2451121|45872|2451234|3698|36671|1428748|25|3702|22184|1122252|5580|10057|20|26|20|1|249|84|29|26.12|39.18|12.14|784.16|352.06|757.48|1136.22|17.60|0.00|420.21|352.06|369.66|772.27|789.87|-405.42| +2452497|65461|2452533|3763|96461|1344599|737|5275|76678|1083332|6889|17027|27|25|15|4|266|85|27|54.20|115.98|92.78|626.40|2505.06|1463.40|3131.46|40.58|1828.69|406.89|676.37|716.95|1083.26|1123.84|-787.03| +2452497|65461|2452557|2407|96461|1344599|737|5275|76678|1083332|6889|17027|60|30|20|5|180|85|29|15.04|29.92|15.55|416.73|450.95|436.16|867.68|31.56|0.00|407.74|450.95|482.51|858.69|890.25|14.79| +2452497|65461|2452502|6225|96461|1344599|737|5275|76678|1083332|6889|17027|3|1|20|4|208|85|33|90.72|192.32|151.93|1332.87|5013.69|2993.76|6346.56|0.00|0.00|2855.82|5013.69|5013.69|7869.51|7869.51|2019.93| +2452497|65461|2452558|6987|96461|1344599|737|5275|76678|1083332|6889|17027|9|19|15|5|62|85|91|3.87|7.23|4.04|290.29|367.64|352.17|657.93|33.08|0.00|203.84|367.64|400.72|571.48|604.56|15.47| +2452497|65461|2452541|11436|96461|1344599|737|5275|76678|1083332|6889|17027|42|24|3|4|262|85|26|65.75|75.61|51.41|629.20|1336.66|1709.50|1965.86|80.19|0.00|963.04|1336.66|1416.85|2299.70|2379.89|-372.84| +2452497|65461|2452510|7419|96461|1344599|737|5275|76678|1083332|6889|17027|36|18|7|3|26|85|9|70.40|119.68|119.68|0.00|1077.12|633.60|1077.12|64.62|0.00|387.72|1077.12|1141.74|1464.84|1529.46|443.52| +2452497|65461|2452546|9234|96461|1344599|737|5275|76678|1083332|6889|17027|9|21|20|1|52|85|76|23.72|36.29|29.75|497.04|2261.00|1802.72|2758.04|158.27|0.00|1241.08|2261.00|2419.27|3502.08|3660.35|458.28| +2452497|65461|2452522|13327|96461|1344599|737|5275|76678|1083332|6889|17027|24|7|16|3|130|85|57|62.93|173.68|145.89|1584.03|8315.73|3587.01|9899.76|131.38|1746.30|4850.70|6569.43|6700.81|11420.13|11551.51|2982.42| +2452497|65461|2452565|5881|96461|1344599|737|5275|76678|1083332|6889|17027|25|21|9|5|208|85|35|85.68|231.33|106.41|4372.20|3724.35|2998.80|8096.55|37.24|0.00|1052.45|3724.35|3761.59|4776.80|4814.04|725.55| +2452497|65461|2452504|3465|96461|1344599|737|5275|76678|1083332|6889|17027|15|18|20|3|109|85|7|16.15|44.41|18.20|183.47|127.40|113.05|310.87|10.19|0.00|6.16|127.40|137.59|133.56|143.75|14.35| +2452497|65461|2452504|7290|96461|1344599|737|5275|76678|1083332|6889|17027|60|24|17|4|11|85|69|98.55|111.36|16.70|6531.54|1152.30|6799.95|7683.84|11.52|0.00|2151.42|1152.30|1163.82|3303.72|3315.24|-5647.65| +2452497|65461|2452534|9801|96461|1344599|737|5275|76678|1083332|6889|17027|13|19|13|1|19|85|58|78.73|187.37|67.45|6955.36|3912.10|4566.34|10867.46|195.60|0.00|3477.10|3912.10|4107.70|7389.20|7584.80|-654.24| +2452497|65461|2452507|7959|96461|1344599|737|5275|76678|1083332|6889|17027|21|7|10|1|149|85|45|35.10|74.06|46.65|1233.45|2099.25|1579.50|3332.70|0.00|1196.57|1133.10|902.68|902.68|2035.78|2035.78|-676.82| +2452497|65461|2452581|4401|96461|1344599|737|5275|76678|1083332|6889|17027|7|30|19|3|237|85|53|75.46|141.11|118.53|1196.74|6282.09|3999.38|7478.83|56.53|4397.46|3514.96|1884.63|1941.16|5399.59|5456.12|-2114.75| +2451408|62038|2451502|3448|21094|1129773|5299|42378|21094|1129773|5299|42378|55|14|8|5|285|86|25|72.25|137.99|91.07|1173.00|2276.75|1806.25|3449.75|85.37|569.18|1724.75|1707.57|1792.94|3432.32|3517.69|-98.68| +2451408|62038|2451491|15367|21094|1129773|5299|42378|21094|1129773|5299|42378|55|13|13|2|108|86|29|83.60|181.41|14.51|4840.10|420.79|2424.40|5260.89|37.87|0.00|1052.12|420.79|458.66|1472.91|1510.78|-2003.61| +2451408|62038|2451525|16273|21094|1129773|5299|42378|21094|1129773|5299|42378|52|16|1|3|15|86|98|16.66|17.65|3.53|1383.76|345.94|1632.68|1729.70|0.00|0.00|414.54|345.94|345.94|760.48|760.48|-1286.74| +2451408|62038|2451427|3241|21094|1129773|5299|42378|21094|1129773|5299|42378|58|2|4|1|248|86|4|79.84|216.36|205.54|43.28|822.16|319.36|865.44|49.32|0.00|86.52|822.16|871.48|908.68|958.00|502.80| +2451408|62038|2451502|8605|21094|1129773|5299|42378|21094|1129773|5299|42378|38|1|9|2|25|86|86|83.21|232.15|71.96|13776.34|6188.56|7156.06|19964.90|183.80|4146.33|6587.60|2042.23|2226.03|8629.83|8813.63|-5113.83| +2451408|62038|2451461|3092|21094|1129773|5299|42378|21094|1129773|5299|42378|43|22|12|5|185|86|41|80.96|235.59|179.04|2318.55|7340.64|3319.36|9659.19|293.62|0.00|4443.17|7340.64|7634.26|11783.81|12077.43|4021.28| +2451408|62038|2451443|8834|21094|1129773|5299|42378|21094|1129773|5299|42378|43|14|14|5|116|86|53|56.87|79.04|32.40|2471.92|1717.20|3014.11|4189.12|137.37|0.00|1633.46|1717.20|1854.57|3350.66|3488.03|-1296.91| +2451408|62038|2451466|17228|21094|1129773|5299|42378|21094|1129773|5299|42378|10|26|5|3|234|86|76|37.99|94.59|33.10|4673.24|2515.60|2887.24|7188.84|50.31|0.00|430.92|2515.60|2565.91|2946.52|2996.83|-371.64| +2451408|62038|2451477|9988|21094|1129773|5299|42378|21094|1129773|5299|42378|55|20|4|5|163|86|30|90.13|228.93|169.40|1785.90|5082.00|2703.90|6867.90|204.29|1677.06|3090.30|3404.94|3609.23|6495.24|6699.53|701.04| +2451408|62038|2451453|16633|21094|1129773|5299|42378|21094|1129773|5299|42378|40|14|15|2|239|86|92|29.54|76.50|35.95|3730.60|3307.40|2717.68|7038.00|297.66|0.00|1336.76|3307.40|3605.06|4644.16|4941.82|589.72| +2451408|62038|2451497|16057|21094|1129773|5299|42378|21094|1129773|5299|42378|1|4|2|4|293|86|33|76.59|229.77|156.24|2426.49|5155.92|2527.47|7582.41|257.79|0.00|1743.72|5155.92|5413.71|6899.64|7157.43|2628.45| +2451408|62038|2451437|13754|21094|1129773|5299|42378|21094|1129773|5299|42378|20|10|1|4|104|86|19|73.94|158.23|129.74|541.31|2465.06|1404.86|3006.37|98.60|0.00|691.41|2465.06|2563.66|3156.47|3255.07|1060.20| +2451408|62038|2451436|14476|21094|1129773|5299|42378|21094|1129773|5299|42378|13|13|20|5|30|86|71|5.85|10.53|10.10|30.53|717.10|415.35|747.63|50.19|0.00|246.37|717.10|767.29|963.47|1013.66|301.75| +2452050|17840|2452089|927|49208|175411|6414|25017|84577|1855388|1626|43076|57|19|11|2|283|87|46|42.65|46.48|42.29|192.74|1945.34|1961.90|2138.08|77.81|0.00|277.84|1945.34|2023.15|2223.18|2300.99|-16.56| +2452050|17840|2452120|8415|49208|175411|6414|25017|84577|1855388|1626|43076|25|27|15|1|257|87|11|43.07|80.11|13.61|731.50|149.71|473.77|881.21|0.00|0.00|255.53|149.71|149.71|405.24|405.24|-324.06| +2452050|17840|2452147|12727|49208|175411|6414|25017|84577|1855388|1626|43076|35|23|9|1|220|87|52|89.77|266.61|218.62|2495.48|11368.24|4668.04|13863.72|0.00|6366.21|6377.28|5002.03|5002.03|11379.31|11379.31|333.99| +2452050|17840|2452153|2453|49208|175411|6414|25017|84577|1855388|1626|43076|53|17|8|5|52|87|92|75.61|124.00|58.28|6046.24|5361.76|6956.12|11408.00|53.61|0.00|4791.36|5361.76|5415.37|10153.12|10206.73|-1594.36| +2452050|17840|2452125|11585|49208|175411|6414|25017|84577|1855388|1626|43076|29|5|11|4|177|87|16|87.55|229.38|135.33|1504.80|2165.28|1400.80|3670.08|108.26|0.00|403.68|2165.28|2273.54|2568.96|2677.22|764.48| +2452050|17840|2452097|15117|49208|175411|6414|25017|84577|1855388|1626|43076|29|29|15|2|142|87|61|91.13|236.93|144.52|5637.01|8815.72|5558.93|14452.73|88.15|0.00|4624.41|8815.72|8903.87|13440.13|13528.28|3256.79| +2452050|17840|2452154|7105|49208|175411|6414|25017|84577|1855388|1626|43076|31|7|5|3|44|87|78|86.00|253.70|81.18|13456.56|6332.04|6708.00|19788.60|379.92|0.00|1978.86|6332.04|6711.96|8310.90|8690.82|-375.96| +2452050|17840|2452138|3059|49208|175411|6414|25017|84577|1855388|1626|43076|25|5|6|1|19|87|30|42.68|84.93|23.78|1834.50|713.40|1280.40|2547.90|6.06|592.12|942.60|121.28|127.34|1063.88|1069.94|-1159.12| +2452050|17840|2452066|13933|49208|175411|6414|25017|84577|1855388|1626|43076|39|13|14|1|118|87|89|91.71|104.54|22.99|7257.95|2046.11|8162.19|9304.06|102.30|0.00|1953.55|2046.11|2148.41|3999.66|4101.96|-6116.08| +2452050|17840|2452074|13271|49208|175411|6414|25017|84577|1855388|1626|43076|23|3|19|4|46|87|63|72.87|197.47|195.49|124.74|12315.87|4590.81|12440.61|0.00|2955.80|1741.32|9360.07|9360.07|11101.39|11101.39|4769.26| +2452050|17840|2452072|3371|49208|175411|6414|25017|84577|1855388|1626|43076|5|3|11|2|96|87|11|94.40|221.84|179.69|463.65|1976.59|1038.40|2440.24|28.46|553.44|365.97|1423.15|1451.61|1789.12|1817.58|384.75| +2452050|17840|2452122|9725|49208|175411|6414|25017|84577|1855388|1626|43076|13|5|6|2|95|87|27|69.30|120.58|13.26|2897.64|358.02|1871.10|3255.66|21.48|0.00|911.52|358.02|379.50|1269.54|1291.02|-1513.08| +2452050|17840|2452103|745|49208|175411|6414|25017|84577|1855388|1626|43076|13|9|6|2|8|87|67|61.18|176.81|104.31|4857.50|6988.77|4099.06|11846.27|628.98|0.00|1065.97|6988.77|7617.75|8054.74|8683.72|2889.71| +2452050|17840|2452070|31|49208|175411|6414|25017|84577|1855388|1626|43076|13|3|7|2|197|87|53|65.33|163.32|97.99|3462.49|5193.47|3462.49|8655.96|207.73|0.00|2510.08|5193.47|5401.20|7703.55|7911.28|1730.98| +2451375|59503|2451468|9092|74942|1114698|854|36610|76788|822852|3078|18834|50|20|6|1|266|88|54|94.76|276.69|146.64|7022.70|7918.56|5117.04|14941.26|0.00|0.00|5378.40|7918.56|7918.56|13296.96|13296.96|2801.52| +2451375|59503|2451407|14851|74942|1114698|854|36610|76788|822852|3078|18834|34|8|19|3|167|88|4|21.28|35.32|27.54|31.12|110.16|85.12|141.28|8.81|0.00|66.40|110.16|118.97|176.56|185.37|25.04| +2451375|59503|2451400|373|74942|1114698|854|36610|76788|822852|3078|18834|4|7|5|3|164|88|89|6.60|10.95|6.35|409.40|565.15|587.40|974.55|45.21|0.00|136.17|565.15|610.36|701.32|746.53|-22.25| +2451375|59503|2451401|6680|74942|1114698|854|36610|76788|822852|3078|18834|49|10|17|1|92|88|1|93.73|269.00|166.78|102.22|166.78|93.73|269.00|6.67|0.00|26.90|166.78|173.45|193.68|200.35|73.05| +2451375|59503|2451418|16792|74942|1114698|854|36610|76788|822852|3078|18834|20|2|3|1|222|88|93|42.11|82.53|48.69|3147.12|4528.17|3916.23|7675.29|181.12|0.00|3837.18|4528.17|4709.29|8365.35|8546.47|611.94| +2451375|59503|2451395|12230|74942|1114698|854|36610|76788|822852|3078|18834|55|26|7|3|39|88|3|38.33|110.00|23.10|260.70|69.30|114.99|330.00|2.07|0.00|102.30|69.30|71.37|171.60|173.67|-45.69| +2451375|59503|2451485|7939|74942|1114698|854|36610|76788|822852|3078|18834|32|13|4|4|284|88|71|17.52|26.63|5.59|1493.84|396.89|1243.92|1890.73|35.72|0.00|510.49|396.89|432.61|907.38|943.10|-847.03| +2451375|59503|2451462|692|74942|1114698|854|36610|76788|822852|3078|18834|55|16|3|2|51|88|6|62.28|66.63|18.65|287.88|111.90|373.68|399.78|5.59|0.00|119.88|111.90|117.49|231.78|237.37|-261.78| +2451375|59503|2451490|16327|74942|1114698|854|36610|76788|822852|3078|18834|13|19|20|3|93|88|66|40.39|64.22|37.88|1738.44|2500.08|2665.74|4238.52|150.00|0.00|1143.78|2500.08|2650.08|3643.86|3793.86|-165.66| +2451375|59503|2451459|871|74942|1114698|854|36610|76788|822852|3078|18834|14|8|2|4|93|88|56|24.16|55.08|31.94|1295.84|1788.64|1352.96|3084.48|16.63|125.20|554.96|1663.44|1680.07|2218.40|2235.03|310.48| +2451375|59503|2451457|9541|74942|1114698|854|36610|76788|822852|3078|18834|22|8|18|1|44|88|82|62.62|120.85|93.05|2279.60|7630.10|5134.84|9909.70|534.10|0.00|594.50|7630.10|8164.20|8224.60|8758.70|2495.26| +2451513|55518|2451541|17686|91083|39218|4953|48288|47292|749108|5493|41228|44|26|18|4|123|89|47|90.20|207.46|188.78|877.96|8872.66|4239.40|9750.62|377.97|2573.07|4582.50|6299.59|6677.56|10882.09|11260.06|2060.19| +2451513|55518|2451530|9146|91083|39218|4953|48288|47292|749108|5493|41228|1|7|7|5|193|89|57|59.85|96.95|22.29|4255.62|1270.53|3411.45|5526.15|76.23|0.00|2265.18|1270.53|1346.76|3535.71|3611.94|-2140.92| +2451513|55518|2451523|2116|91083|39218|4953|48288|47292|749108|5493|41228|32|16|4|4|114|89|11|3.04|7.90|4.10|41.80|45.10|33.44|86.90|2.70|0.00|41.69|45.10|47.80|86.79|89.49|11.66| +2451513|55518|2451515|6578|91083|39218|4953|48288|47292|749108|5493|41228|58|13|17|2|284|89|55|29.43|36.78|3.31|1840.85|182.05|1618.65|2022.90|1.82|0.00|485.10|182.05|183.87|667.15|668.97|-1436.60| +2451513|55518|2451577|14935|91083|39218|4953|48288|47292|749108|5493|41228|34|13|9|1|163|89|16|32.31|73.98|71.02|47.36|1136.32|516.96|1183.68|79.54|0.00|165.60|1136.32|1215.86|1301.92|1381.46|619.36| +2451513|55518|2451570|3484|91083|39218|4953|48288|47292|749108|5493|41228|16|22|2|5|65|89|59|93.42|184.03|90.17|5537.74|5320.03|5511.78|10857.77|266.00|0.00|4017.31|5320.03|5586.03|9337.34|9603.34|-191.75| +2451513|55518|2451624|2138|91083|39218|4953|48288|47292|749108|5493|41228|10|19|1|5|216|89|26|19.61|20.98|19.51|38.22|507.26|509.86|545.48|0.00|0.00|223.60|507.26|507.26|730.86|730.86|-2.60| +2451513|55518|2451597|14116|91083|39218|4953|48288|47292|749108|5493|41228|19|4|3|3|153|89|46|77.46|182.03|40.04|6531.54|1841.84|3563.16|8373.38|55.25|0.00|2176.72|1841.84|1897.09|4018.56|4073.81|-1721.32| +2451513|55518|2451599|2749|91083|39218|4953|48288|47292|749108|5493|41228|50|8|14|3|62|89|53|94.41|276.62|47.02|12168.80|2492.06|5003.73|14660.86|224.28|0.00|5717.64|2492.06|2716.34|8209.70|8433.98|-2511.67| +2451513|55518|2451592|5080|91083|39218|4953|48288|47292|749108|5493|41228|58|22|7|2|154|89|62|69.83|83.79|3.35|4987.28|207.70|4329.46|5194.98|16.61|0.00|0.00|207.70|224.31|207.70|224.31|-4121.76| +2451513|55518|2451593|6814|91083|39218|4953|48288|47292|749108|5493|41228|55|4|11|1|274|89|87|69.06|158.14|23.72|11694.54|2063.64|6008.22|13758.18|5.57|2001.73|6328.38|61.91|67.48|6390.29|6395.86|-5946.31| +2452274|6180|2452357|1873|87541|1076044|6500|12956|437|1493380|1124|1980|15|29|9|1|50|90|63|53.88|120.15|72.09|3027.78|4541.67|3394.44|7569.45|181.66|0.00|3708.81|4541.67|4723.33|8250.48|8432.14|1147.23| +2452274|6180|2452339|8519|87541|1076044|6500|12956|437|1493380|1124|1980|55|7|3|4|255|90|14|96.62|185.51|87.18|1376.62|1220.52|1352.68|2597.14|36.61|0.00|519.40|1220.52|1257.13|1739.92|1776.53|-132.16| +2452274|6180|2452391|7679|87541|1076044|6500|12956|437|1493380|1124|1980|33|7|1|3|205|90|70|76.07|126.27|1.26|8750.70|88.20|5324.90|8838.90|1.76|0.00|795.20|88.20|89.96|883.40|885.16|-5236.70| +2452274|6180|2452311|1597|87541|1076044|6500|12956|437|1493380|1124|1980|55|13|6|5|124|90|48|97.38|120.75|109.88|521.76|5274.24|4674.24|5796.00|0.00|0.00|2782.08|5274.24|5274.24|8056.32|8056.32|600.00| +2452274|6180|2452318|16025|87541|1076044|6500|12956|437|1493380|1124|1980|1|3|3|4|222|90|7|83.36|240.91|197.54|303.59|1382.78|583.52|1686.37|69.13|0.00|522.76|1382.78|1451.91|1905.54|1974.67|799.26| +2452274|6180|2452290|8603|87541|1076044|6500|12956|437|1493380|1124|1980|53|29|16|4|55|90|43|21.04|49.65|33.76|683.27|1451.68|904.72|2134.95|58.06|0.00|191.78|1451.68|1509.74|1643.46|1701.52|546.96| +2452274|6180|2452360|7791|87541|1076044|6500|12956|437|1493380|1124|1980|55|23|3|4|278|90|98|82.93|208.98|73.14|13312.32|7167.72|8127.14|20480.04|116.11|3297.15|5733.98|3870.57|3986.68|9604.55|9720.66|-4256.57| +2452274|6180|2452329|14505|87541|1076044|6500|12956|437|1493380|1124|1980|37|5|20|2|204|90|39|7.66|13.09|1.83|439.14|71.37|298.74|510.51|3.56|0.00|45.63|71.37|74.93|117.00|120.56|-227.37| +2452274|6180|2452368|4713|87541|1076044|6500|12956|437|1493380|1124|1980|57|25|4|3|62|90|7|17.68|33.76|33.42|2.38|233.94|123.76|236.32|0.00|0.00|75.60|233.94|233.94|309.54|309.54|110.18| +2452274|6180|2452356|1083|87541|1076044|6500|12956|437|1493380|1124|1980|3|11|17|1|145|90|73|26.38|36.40|15.65|1514.75|1142.45|1925.74|2657.20|62.37|102.82|690.58|1039.63|1102.00|1730.21|1792.58|-886.11| +2452274|6180|2452323|9071|87541|1076044|6500|12956|437|1493380|1124|1980|45|7|2|3|164|90|36|11.07|13.39|2.81|380.88|101.16|398.52|482.04|1.70|79.91|0.00|21.25|22.95|21.25|22.95|-377.27| +2452274|6180|2452315|1277|87541|1076044|6500|12956|437|1493380|1124|1980|47|21|11|3|145|90|42|78.09|102.29|81.83|859.32|3436.86|3279.78|4296.18|60.83|1409.11|1116.78|2027.75|2088.58|3144.53|3205.36|-1252.03| +2451398|35797|2451462|3367|90554|1506619|2684|23149|24539|640349|3297|32962|14|1|14|2|280|91|70|27.11|46.90|26.73|1411.90|1871.10|1897.70|3283.00|74.84|0.00|1181.60|1871.10|1945.94|3052.70|3127.54|-26.60| +2451398|35797|2451498|2122|90554|1506619|2684|23149|24539|640349|3297|32962|43|8|1|4|97|91|87|95.95|253.30|141.84|9697.02|12340.08|8347.65|22037.10|123.40|0.00|5728.95|12340.08|12463.48|18069.03|18192.43|3992.43| +2451398|35797|2451505|5426|90554|1506619|2684|23149|24539|640349|3297|32962|7|14|2|1|170|91|1|13.57|26.19|2.35|23.84|2.35|13.57|26.19|0.16|0.00|3.66|2.35|2.51|6.01|6.17|-11.22| +2451398|35797|2451435|7460|90554|1506619|2684|23149|24539|640349|3297|32962|31|13|8|4|123|91|39|92.49|94.33|66.03|1103.70|2575.17|3607.11|3678.87|231.76|0.00|36.66|2575.17|2806.93|2611.83|2843.59|-1031.94| +2451398|35797|2451478|8120|90554|1506619|2684|23149|24539|640349|3297|32962|25|13|1|5|109|91|80|35.51|100.49|42.20|4663.20|3376.00|2840.80|8039.20|135.04|0.00|2089.60|3376.00|3511.04|5465.60|5600.64|535.20| +2451398|35797|2451501|1906|90554|1506619|2684|23149|24539|640349|3297|32962|20|8|6|5|84|91|52|17.40|48.54|43.20|277.68|2246.40|904.80|2524.08|112.32|0.00|1034.80|2246.40|2358.72|3281.20|3393.52|1341.60| +2451398|35797|2451429|10436|90554|1506619|2684|23149|24539|640349|3297|32962|19|7|11|5|129|91|47|57.26|142.57|76.98|3082.73|3618.06|2691.22|6700.79|36.18|0.00|2278.09|3618.06|3654.24|5896.15|5932.33|926.84| +2451398|35797|2451509|5974|90554|1506619|2684|23149|24539|640349|3297|32962|10|19|1|5|269|91|75|3.81|6.47|5.17|97.50|387.75|285.75|485.25|2.94|314.07|130.50|73.68|76.62|204.18|207.12|-212.07| +2452210|33897|2452267|8255|73296|1855583|5070|8110|76380|1396321|524|48764|39|15|6|4|62|92|83|81.21|176.22|77.53|8191.27|6434.99|6740.43|14626.26|450.44|0.00|1169.47|6434.99|6885.43|7604.46|8054.90|-305.44| +2452210|33897|2452230|5221|73296|1855583|5070|8110|76380|1396321|524|48764|47|17|17|5|273|92|76|96.40|188.94|52.90|10339.04|4020.40|7326.40|14359.44|241.22|0.00|1292.00|4020.40|4261.62|5312.40|5553.62|-3306.00| +2452210|33897|2452273|7749|73296|1855583|5070|8110|76380|1396321|524|48764|23|29|18|1|77|92|38|16.92|36.20|3.62|1238.04|137.56|642.96|1375.60|4.12|0.00|632.70|137.56|141.68|770.26|774.38|-505.40| +2452210|33897|2452273|16029|73296|1855583|5070|8110|76380|1396321|524|48764|25|9|7|4|251|92|42|87.38|187.86|24.42|6864.48|1025.64|3669.96|7890.12|30.76|0.00|3234.84|1025.64|1056.40|4260.48|4291.24|-2644.32| +2452210|33897|2452289|17041|73296|1855583|5070|8110|76380|1396321|524|48764|21|25|15|5|72|92|91|1.53|1.63|1.51|10.92|137.41|139.23|148.33|4.12|0.00|38.22|137.41|141.53|175.63|179.75|-1.82| +2452210|33897|2452304|3193|73296|1855583|5070|8110|76380|1396321|524|48764|43|11|17|1|272|92|9|50.87|108.35|31.42|692.37|282.78|457.83|975.15|5.65|0.00|458.28|282.78|288.43|741.06|746.71|-175.05| +2452210|33897|2452327|2151|73296|1855583|5070|8110|76380|1396321|524|48764|29|27|20|4|168|92|91|48.95|127.75|16.60|10114.65|1510.60|4454.45|11625.25|45.31|0.00|930.02|1510.60|1555.91|2440.62|2485.93|-2943.85| +2452210|33897|2452256|17951|73296|1855583|5070|8110|76380|1396321|524|48764|27|15|2|3|177|92|37|45.72|67.66|6.76|2253.30|250.12|1691.64|2503.42|20.00|0.00|1101.49|250.12|270.12|1351.61|1371.61|-1441.52| +2452210|33897|2452328|9885|73296|1855583|5070|8110|76380|1396321|524|48764|33|21|2|3|67|92|46|67.06|80.47|72.42|370.30|3331.32|3084.76|3701.62|113.93|2065.41|407.10|1265.91|1379.84|1673.01|1786.94|-1818.85| +2452601|71061|2452606|1957|11894|1711901|355|23562|70838|1051564|5630|44884|39|19|3|3|296|93|21|93.55|198.32|71.39|2665.53|1499.19|1964.55|4164.72|0.00|0.00|1041.18|1499.19|1499.19|2540.37|2540.37|-465.36| +2452601|71061|2452610|2727|11894|1711901|355|23562|70838|1051564|5630|44884|15|19|12|4|219|93|46|21.36|33.74|29.69|186.30|1365.74|982.56|1552.04|13.65|0.00|713.92|1365.74|1379.39|2079.66|2093.31|383.18| +2452601|71061|2452666|17401|11894|1711901|355|23562|70838|1051564|5630|44884|12|19|1|5|59|93|9|28.50|61.56|59.71|16.65|537.39|256.50|554.04|48.36|0.00|160.65|537.39|585.75|698.04|746.40|280.89| +2452601|71061|2452672|7489|11894|1711901|355|23562|70838|1051564|5630|44884|49|13|9|3|230|93|46|88.52|185.89|76.21|5045.28|3505.66|4071.92|8550.94|280.45|0.00|4104.12|3505.66|3786.11|7609.78|7890.23|-566.26| +2452601|71061|2452624|11707|11894|1711901|355|23562|70838|1051564|5630|44884|55|7|17|3|156|93|72|1.22|3.41|2.01|100.80|144.72|87.84|245.52|8.68|0.00|19.44|144.72|153.40|164.16|172.84|56.88| +2452601|71061|2452617|1410|11894|1711901|355|23562|70838|1051564|5630|44884|57|1|13|5|90|93|100|82.83|223.64|187.85|3579.00|18785.00|8283.00|22364.00|1690.65|0.00|6038.00|18785.00|20475.65|24823.00|26513.65|10502.00| +2452601|71061|2452607|9013|11894|1711901|355|23562|70838|1051564|5630|44884|33|19|3|3|146|93|30|4.82|11.03|4.30|201.90|129.00|144.60|330.90|0.46|105.78|138.90|23.22|23.68|162.12|162.58|-121.38| +2452601|71061|2452655|9663|11894|1711901|355|23562|70838|1051564|5630|44884|42|9|9|3|77|93|4|84.68|186.29|50.29|544.00|201.16|338.72|745.16|15.61|6.03|312.96|195.13|210.74|508.09|523.70|-143.59| +2452601|71061|2452659|6690|11894|1711901|355|23562|70838|1051564|5630|44884|45|19|17|1|188|93|9|24.71|44.97|39.12|52.65|352.08|222.39|404.73|0.00|0.00|89.01|352.08|352.08|441.09|441.09|129.69| +2452601|71061|2452679|753|11894|1711901|355|23562|70838|1051564|5630|44884|12|27|2|2|255|93|51|84.32|145.03|140.67|222.36|7174.17|4300.32|7396.53|215.22|0.00|2292.45|7174.17|7389.39|9466.62|9681.84|2873.85| +2452601|71061|2452699|5985|11894|1711901|355|23562|70838|1051564|5630|44884|15|3|6|5|44|93|73|96.45|236.30|134.69|7417.53|9832.37|7040.85|17249.90|884.91|0.00|5174.97|9832.37|10717.28|15007.34|15892.25|2791.52| +2452601|71061|2452665|3759|11894|1711901|355|23562|70838|1051564|5630|44884|30|3|9|1|150|93|55|2.20|2.70|0.29|132.55|15.95|121.00|148.50|0.47|0.00|38.50|15.95|16.42|54.45|54.92|-105.05| +2452601|71061|2452711|13681|11894|1711901|355|23562|70838|1051564|5630|44884|39|15|6|4|212|93|17|51.89|91.84|62.45|499.63|1061.65|882.13|1561.28|10.61|0.00|733.72|1061.65|1072.26|1795.37|1805.98|179.52| +2452601|71061|2452690|13597|11894|1711901|355|23562|70838|1051564|5630|44884|27|6|19|3|254|93|43|66.20|114.52|91.61|985.13|3939.23|2846.60|4924.36|236.35|0.00|1428.03|3939.23|4175.58|5367.26|5603.61|1092.63| +2452601|71061|2452669|7113|11894|1711901|355|23562|70838|1051564|5630|44884|51|9|7|4|288|93|93|99.73|268.27|131.45|12724.26|12224.85|9274.89|24949.11|122.24|0.00|11725.44|12224.85|12347.09|23950.29|24072.53|2949.96| +2451033|74433|2451050|625|89077|352774|5271|21768|60926|1006342|5659|15356|43|20|3|2|264|94|87|47.91|52.70|2.63|4356.09|228.81|4168.17|4584.90|6.86|0.00|182.70|228.81|235.67|411.51|418.37|-3939.36| +2451033|74433|2451146|4345|89077|352774|5271|21768|60926|1006342|5659|15356|20|1|1|3|38|94|90|20.37|56.42|21.43|3149.10|1928.70|1833.30|5077.80|57.86|0.00|1269.00|1928.70|1986.56|3197.70|3255.56|95.40| +2451033|74433|2451129|4723|89077|352774|5271|21768|60926|1006342|5659|15356|46|2|20|5|63|94|4|75.98|103.33|43.39|239.76|173.56|303.92|413.32|12.14|0.00|136.36|173.56|185.70|309.92|322.06|-130.36| +2451033|74433|2451047|16598|89077|352774|5271|21768|60926|1006342|5659|15356|44|20|9|5|268|94|84|13.80|14.49|13.62|73.08|1144.08|1159.20|1217.16|91.52|0.00|486.36|1144.08|1235.60|1630.44|1721.96|-15.12| +2451033|74433|2451090|8126|89077|352774|5271|21768|60926|1006342|5659|15356|31|8|17|1|265|94|6|49.55|65.90|30.97|209.58|185.82|297.30|395.40|7.43|0.00|86.94|185.82|193.25|272.76|280.19|-111.48| +2451033|74433|2451043|9208|89077|352774|5271|21768|60926|1006342|5659|15356|1|19|19|5|74|94|25|78.43|104.31|84.49|495.50|2112.25|1960.75|2607.75|84.49|0.00|651.75|2112.25|2196.74|2764.00|2848.49|151.50| +2451033|74433|2451118|16892|89077|352774|5271|21768|60926|1006342|5659|15356|37|2|15|3|138|94|46|2.52|4.53|2.49|93.84|114.54|115.92|208.38|2.29|0.00|85.10|114.54|116.83|199.64|201.93|-1.38| +2451033|74433|2451116|11698|89077|352774|5271|21768|60926|1006342|5659|15356|32|22|17|2|237|94|34|55.77|72.50|29.72|1454.52|1010.48|1896.18|2465.00|10.10|0.00|468.18|1010.48|1020.58|1478.66|1488.76|-885.70| +2451033|74433|2451048|12202|89077|352774|5271|21768|60926|1006342|5659|15356|22|20|10|4|248|94|27|53.16|141.40|25.45|3130.65|687.15|1435.32|3817.80|61.84|0.00|381.78|687.15|748.99|1068.93|1130.77|-748.17| +2451033|74433|2451122|12794|89077|352774|5271|21768|60926|1006342|5659|15356|8|10|4|2|150|94|40|42.56|73.62|33.86|1590.40|1354.40|1702.40|2944.80|0.00|0.00|147.20|1354.40|1354.40|1501.60|1501.60|-348.00| +2451994|28063|2452030|11963|66345|115505|4218|9378|74719|1163832|2011|27971|21|9|19|5|264|95|85|25.61|58.39|49.04|794.75|4168.40|2176.85|4963.15|375.15|0.00|545.70|4168.40|4543.55|4714.10|5089.25|1991.55| +2451994|28063|2452080|7785|66345|115505|4218|9378|74719|1163832|2011|27971|59|29|3|1|39|95|51|47.21|66.09|5.94|3067.65|302.94|2407.71|3370.59|0.00|0.00|808.86|302.94|302.94|1111.80|1111.80|-2104.77| +2451994|28063|2452007|423|66345|115505|4218|9378|74719|1163832|2011|27971|19|5|16|4|162|95|52|80.50|212.52|89.25|6410.04|4641.00|4186.00|11051.04|232.05|0.00|5525.52|4641.00|4873.05|10166.52|10398.57|455.00| +2451994|28063|2451998|9279|66345|115505|4218|9378|74719|1163832|2011|27971|3|3|19|4|76|95|10|90.41|256.76|233.65|231.10|2336.50|904.10|2567.60|130.84|467.30|641.90|1869.20|2000.04|2511.10|2641.94|965.10| +2451994|28063|2452028|4647|66345|115505|4218|9378|74719|1163832|2011|27971|19|29|20|5|203|95|91|91.05|249.47|54.88|17707.69|4994.08|8285.55|22701.77|0.00|0.00|9307.48|4994.08|4994.08|14301.56|14301.56|-3291.47| +2451994|28063|2452063|11609|66345|115505|4218|9378|74719|1163832|2011|27971|39|3|17|4|294|95|70|27.23|48.19|25.54|1585.50|1787.80|1906.10|3373.30|0.00|214.53|235.90|1573.27|1573.27|1809.17|1809.17|-332.83| +2451994|28063|2452041|17461|66345|115505|4218|9378|74719|1163832|2011|27971|17|3|15|1|214|95|17|17.17|46.01|24.84|359.89|422.28|291.89|782.17|1.94|325.15|86.02|97.13|99.07|183.15|185.09|-194.76| +2451994|28063|2452069|5857|66345|115505|4218|9378|74719|1163832|2011|27971|15|15|8|3|278|95|61|83.76|226.15|90.46|8277.09|5518.06|5109.36|13795.15|379.64|772.52|6207.36|4745.54|5125.18|10952.90|11332.54|-363.82| +2451994|28063|2451998|15813|66345|115505|4218|9378|74719|1163832|2011|27971|23|19|8|3|251|95|75|90.47|264.17|184.91|5944.50|13868.25|6785.25|19812.75|554.73|0.00|6141.75|13868.25|14422.98|20010.00|20564.73|7083.00| +2451994|28063|2452101|3493|66345|115505|4218|9378|74719|1163832|2011|27971|3|5|1|2|73|95|23|42.55|69.35|6.24|1451.53|143.52|978.65|1595.05|8.61|0.00|270.94|143.52|152.13|414.46|423.07|-835.13| +2451994|28063|2452044|3201|66345|115505|4218|9378|74719|1163832|2011|27971|35|29|2|4|133|95|46|76.70|125.78|62.89|2892.94|2892.94|3528.20|5785.88|144.64|0.00|1330.32|2892.94|3037.58|4223.26|4367.90|-635.26| +2451994|28063|2452097|2515|66345|115505|4218|9378|74719|1163832|2011|27971|59|7|18|2|196|95|1|48.80|121.02|62.93|58.09|62.93|48.80|121.02|5.66|0.00|13.31|62.93|68.59|76.24|81.90|14.13| +2451994|28063|2452003|4185|66345|115505|4218|9378|74719|1163832|2011|27971|19|21|9|5|201|95|82|57.30|166.17|141.24|2044.26|11581.68|4698.60|13625.94|926.53|0.00|0.00|11581.68|12508.21|11581.68|12508.21|6883.08| +2451994|28063|2452082|15793|66345|115505|4218|9378|74719|1163832|2011|27971|15|11|17|5|233|95|69|9.73|17.80|4.45|921.15|307.05|671.37|1228.20|14.24|128.96|24.15|178.09|192.33|202.24|216.48|-493.28| +2451994|28063|2452107|11407|66345|115505|4218|9378|74719|1163832|2011|27971|35|9|19|1|160|95|88|83.98|152.00|147.44|401.28|12974.72|7390.24|13376.00|648.73|0.00|5885.44|12974.72|13623.45|18860.16|19508.89|5584.48| +2451153|47564|2451160|14839|11730|1455722|867|14059|94985|1532915|5188|44733|43|14|1|2|70|96|44|73.04|151.92|110.90|1804.88|4879.60|3213.76|6684.48|243.98|0.00|534.60|4879.60|5123.58|5414.20|5658.18|1665.84| +2451153|47564|2451238|5396|11730|1455722|867|14059|94985|1532915|5188|44733|34|2|3|4|5|96|1|31.17|70.44|10.56|59.88|10.56|31.17|70.44|0.52|0.00|21.13|10.56|11.08|31.69|32.21|-20.61| +2451153|47564|2451267|17290|11730|1455722|867|14059|94985|1532915|5188|44733|58|14|9|2|175|96|41|31.32|82.37|42.83|1621.14|1756.03|1284.12|3377.17|11.58|1176.54|1181.62|579.49|591.07|1761.11|1772.69|-704.63| +2451153|47564|2451206|12895|11730|1455722|867|14059|94985|1532915|5188|44733|8|8|2|2|139|96|32|69.25|85.87|82.43|110.08|2637.76|2216.00|2747.84|29.01|2057.45|164.80|580.31|609.32|745.11|774.12|-1635.69| +2451153|47564|2451202|4231|11730|1455722|867|14059|94985|1532915|5188|44733|1|20|14|4|184|96|25|63.03|158.83|38.11|3018.00|952.75|1575.75|3970.75|85.74|0.00|833.75|952.75|1038.49|1786.50|1872.24|-623.00| +2451153|47564|2451229|13282|11730|1455722|867|14059|94985|1532915|5188|44733|37|26|9|2|292|96|32|97.08|161.15|17.72|4589.76|567.04|3106.56|5156.80|39.69|0.00|2114.24|567.04|606.73|2681.28|2720.97|-2539.52| +2451153|47564|2451231|2644|11730|1455722|867|14059|94985|1532915|5188|44733|14|19|2|2|129|96|85|54.99|89.08|77.49|985.15|6586.65|4674.15|7571.80|395.19|0.00|1513.85|6586.65|6981.84|8100.50|8495.69|1912.50| +2451153|47564|2451269|13309|11730|1455722|867|14059|94985|1532915|5188|44733|50|2|8|4|110|96|28|5.29|13.06|4.70|234.08|131.60|148.12|365.68|3.94|0.00|87.64|131.60|135.54|219.24|223.18|-16.52| +2451153|47564|2451204|7669|11730|1455722|867|14059|94985|1532915|5188|44733|28|25|1|4|187|96|10|97.44|136.41|9.54|1268.70|95.40|974.40|1364.10|0.95|0.00|559.20|95.40|96.35|654.60|655.55|-879.00| +2451153|47564|2451250|4291|11730|1455722|867|14059|94985|1532915|5188|44733|2|7|3|5|257|96|23|61.56|110.19|69.41|937.94|1596.43|1415.88|2534.37|9.73|622.60|1190.94|973.83|983.56|2164.77|2174.50|-442.05| +2451153|47564|2451211|17960|11730|1455722|867|14059|94985|1532915|5188|44733|26|20|8|5|2|96|23|82.94|150.12|127.60|517.96|2934.80|1907.62|3452.76|234.78|0.00|103.50|2934.80|3169.58|3038.30|3273.08|1027.18| +2451153|47564|2451235|2564|11730|1455722|867|14059|94985|1532915|5188|44733|26|1|11|3|75|96|27|98.09|169.69|67.87|2749.14|1832.49|2648.43|4581.63|128.27|0.00|1740.96|1832.49|1960.76|3573.45|3701.72|-815.94| +2451153|47564|2451180|2956|11730|1455722|867|14059|94985|1532915|5188|44733|19|28|20|5|180|96|14|44.45|120.01|76.80|604.94|1075.20|622.30|1680.14|64.51|0.00|806.40|1075.20|1139.71|1881.60|1946.11|452.90| +2451153|47564|2451199|1820|11730|1455722|867|14059|94985|1532915|5188|44733|50|16|15|3|251|96|45|5.65|7.06|1.20|263.70|54.00|254.25|317.70|0.19|34.02|76.05|19.98|20.17|96.03|96.22|-234.27| +2451153|47564|2451170|15667|11730|1455722|867|14059|94985|1532915|5188|44733|20|19|2|4|226|96|43|2.61|4.01|2.24|76.11|96.32|112.23|172.43|1.21|76.09|55.04|20.23|21.44|75.27|76.48|-92.00| +2452499|43025|2452599|7215|12874|940613|5152|41644|73596|1392185|5261|24553|55|25|11|1|104|97|17|87.83|173.90|99.12|1271.26|1685.04|1493.11|2956.30|151.65|0.00|236.47|1685.04|1836.69|1921.51|2073.16|191.93| +2452499|43025|2452619|2490|12874|940613|5152|41644|73596|1392185|5261|24553|19|9|20|5|110|97|5|36.97|107.21|38.59|343.10|192.95|184.85|536.05|0.92|177.51|262.65|15.44|16.36|278.09|279.01|-169.41| +2452499|43025|2452597|12775|12874|940613|5152|41644|73596|1392185|5261|24553|37|6|7|3|82|97|21|53.32|86.37|23.31|1324.26|489.51|1119.72|1813.77|4.89|0.00|0.00|489.51|494.40|489.51|494.40|-630.21| +2452499|43025|2452510|16795|12874|940613|5152|41644|73596|1392185|5261|24553|9|6|1|4|57|97|7|52.27|59.58|5.95|375.41|41.65|365.89|417.06|1.87|10.41|8.33|31.24|33.11|39.57|41.44|-334.65| +2452499|43025|2452525|4854|12874|940613|5152|41644|73596|1392185|5261|24553|21|18|17|2|70|97|16|43.63|126.96|44.43|1320.48|710.88|698.08|2031.36|7.10|0.00|304.64|710.88|717.98|1015.52|1022.62|12.80| +2452499|43025|2452551|12751|12874|940613|5152|41644|73596|1392185|5261|24553|36|24|3|4|105|97|68|57.75|137.44|71.46|4486.64|4859.28|3927.00|9345.92|97.18|0.00|1308.32|4859.28|4956.46|6167.60|6264.78|932.28| +2452499|43025|2452616|1123|12874|940613|5152|41644|73596|1392185|5261|24553|42|3|6|5|224|97|71|37.79|65.37|55.56|696.51|3944.76|2683.09|4641.27|355.02|0.00|1484.61|3944.76|4299.78|5429.37|5784.39|1261.67| +2452499|43025|2452554|2493|12874|940613|5152|41644|73596|1392185|5261|24553|27|18|7|2|47|97|25|54.18|104.56|57.50|1176.50|1437.50|1354.50|2614.00|0.00|0.00|548.75|1437.50|1437.50|1986.25|1986.25|83.00| +2452499|43025|2452557|17769|12874|940613|5152|41644|73596|1392185|5261|24553|18|24|18|4|97|97|7|34.06|45.64|28.29|121.45|198.03|238.42|319.48|5.94|0.00|79.87|198.03|203.97|277.90|283.84|-40.39| +2451962|57038|2452053|5093|16297|370935|1573|7811|15799|667647|4125|13163|47|17|4|4|197|98|60|59.78|117.76|37.68|4804.80|2260.80|3586.80|7065.60|180.86|0.00|847.80|2260.80|2441.66|3108.60|3289.46|-1326.00| +2451962|57038|2452024|14793|16297|370935|1573|7811|15799|667647|4125|13163|49|25|11|2|150|98|61|23.45|34.94|12.92|1343.22|788.12|1430.45|2131.34|7.88|0.00|127.49|788.12|796.00|915.61|923.49|-642.33| +2451962|57038|2452012|7305|16297|370935|1573|7811|15799|667647|4125|13163|59|25|6|4|82|98|22|49.32|118.86|36.84|1804.44|810.48|1085.04|2614.92|8.10|0.00|601.26|810.48|818.58|1411.74|1419.84|-274.56| +2451962|57038|2452062|6135|16297|370935|1573|7811|15799|667647|4125|13163|19|25|16|4|52|98|22|44.85|69.96|23.08|1031.36|507.76|986.70|1539.12|0.00|0.00|384.78|507.76|507.76|892.54|892.54|-478.94| +2451962|57038|2452022|3583|16297|370935|1573|7811|15799|667647|4125|13163|31|5|7|1|68|98|7|86.36|217.62|145.80|502.74|1020.60|604.52|1523.34|0.00|0.00|411.25|1020.60|1020.60|1431.85|1431.85|416.08| +2451962|57038|2452000|11701|16297|370935|1573|7811|15799|667647|4125|13163|1|19|19|3|157|98|57|6.48|12.18|9.25|167.01|527.25|369.36|694.26|31.63|0.00|347.13|527.25|558.88|874.38|906.01|157.89| +2451962|57038|2452034|397|16297|370935|1573|7811|15799|667647|4125|13163|47|29|13|1|122|98|73|61.75|73.48|2.20|5203.44|160.60|4507.75|5364.04|11.24|0.00|321.20|160.60|171.84|481.80|493.04|-4347.15| +2451962|57038|2452041|8901|16297|370935|1573|7811|15799|667647|4125|13163|39|23|11|3|145|98|24|93.38|210.10|132.36|1865.76|3176.64|2241.12|5042.40|63.53|0.00|1512.72|3176.64|3240.17|4689.36|4752.89|935.52| +2451962|57038|2452011|4599|16297|370935|1573|7811|15799|667647|4125|13163|35|15|8|1|271|98|76|60.67|100.10|90.09|760.76|6846.84|4610.92|7607.60|547.74|0.00|380.00|6846.84|7394.58|7226.84|7774.58|2235.92| +2451962|57038|2452074|17183|16297|370935|1573|7811|15799|667647|4125|13163|5|9|17|1|258|98|71|92.33|141.26|46.61|6720.15|3309.31|6555.43|10029.46|99.27|0.00|802.30|3309.31|3408.58|4111.61|4210.88|-3246.12| +2451962|57038|2452080|13859|16297|370935|1573|7811|15799|667647|4125|13163|15|11|14|2|270|98|5|66.92|88.33|84.79|17.70|423.95|334.60|441.65|33.91|0.00|0.00|423.95|457.86|423.95|457.86|89.35| +2451962|57038|2452072|10529|16297|370935|1573|7811|15799|667647|4125|13163|57|9|14|5|136|98|33|99.49|143.26|97.41|1513.05|3214.53|3283.17|4727.58|12.53|1960.86|2221.89|1253.67|1266.20|3475.56|3488.09|-2029.50| +2451962|57038|2452071|9447|16297|370935|1573|7811|15799|667647|4125|13163|7|17|7|2|130|98|41|22.73|63.64|40.09|965.55|1643.69|931.93|2609.24|0.00|0.00|547.76|1643.69|1643.69|2191.45|2191.45|711.76| +2451543|25265|2451633|9364|19821|1761697|4373|3049|77876|54460|1124|31000|56|1|6|3|60|99|93|78.00|226.98|199.74|2533.32|18575.82|7254.00|21109.14|105.88|7987.60|6332.37|10588.22|10694.10|16920.59|17026.47|3334.22| +2451543|25265|2451629|13628|19821|1761697|4373|3049|77876|54460|1124|31000|55|25|11|1|14|99|62|25.08|50.66|40.52|628.68|2512.24|1554.96|3140.92|200.97|0.00|1067.64|2512.24|2713.21|3579.88|3780.85|957.28| +2451543|25265|2451562|12397|19821|1761697|4373|3049|77876|54460|1124|31000|2|10|17|3|219|99|42|48.19|137.34|105.75|1326.78|4441.50|2023.98|5768.28|0.00|1465.69|461.16|2975.81|2975.81|3436.97|3436.97|951.83| +2451543|25265|2451619|6244|19821|1761697|4373|3049|77876|54460|1124|31000|8|22|19|4|206|99|20|18.62|47.48|18.99|569.80|379.80|372.40|949.60|19.36|56.97|246.80|322.83|342.19|569.63|588.99|-49.57| +2451543|25265|2451640|13684|19821|1761697|4373|3049|77876|54460|1124|31000|58|22|14|4|191|99|5|35.16|60.82|55.34|27.40|276.70|175.80|304.10|24.90|0.00|149.00|276.70|301.60|425.70|450.60|100.90| +2451543|25265|2451655|6950|19821|1761697|4373|3049|77876|54460|1124|31000|52|19|6|4|185|99|29|70.39|175.97|131.97|1276.00|3827.13|2041.31|5103.13|153.08|0.00|1785.82|3827.13|3980.21|5612.95|5766.03|1785.82| +2451543|25265|2451653|1411|19821|1761697|4373|3049|77876|54460|1124|31000|44|10|19|5|218|99|64|79.63|137.75|26.17|7141.12|1674.88|5096.32|8816.00|133.99|0.00|881.28|1674.88|1808.87|2556.16|2690.15|-3421.44| +2451543|25265|2451600|10819|19821|1761697|4373|3049|77876|54460|1124|31000|31|14|20|5|7|99|71|16.54|34.56|16.58|1276.58|1177.18|1174.34|2453.76|58.85|0.00|490.61|1177.18|1236.03|1667.79|1726.64|2.84| +2451543|25265|2451662|11755|19821|1761697|4373|3049|77876|54460|1124|31000|46|14|2|3|297|99|45|73.53|183.08|148.29|1565.55|6673.05|3308.85|8238.60|266.92|0.00|3295.35|6673.05|6939.97|9968.40|10235.32|3364.20| +2451543|25265|2451600|4777|19821|1761697|4373|3049|77876|54460|1124|31000|32|1|5|4|103|99|86|35.37|78.52|8.63|6010.54|742.18|3041.82|6752.72|22.26|0.00|2565.38|742.18|764.44|3307.56|3329.82|-2299.64| +2451543|25265|2451643|10922|19821|1761697|4373|3049|77876|54460|1124|31000|10|25|10|2|94|99|87|91.98|102.09|66.35|3109.38|5772.45|8002.26|8881.83|387.90|923.59|976.14|4848.86|5236.76|5825.00|6212.90|-3153.40| +2451543|25265|2451568|11620|19821|1761697|4373|3049|77876|54460|1124|31000|1|28|19|3|87|99|46|11.57|20.13|13.08|324.30|601.68|532.22|925.98|6.73|517.44|287.04|84.24|90.97|371.28|378.01|-447.98| +2451489|45554|2451561|15598|87996|1424698|3447|49349|2066|1191653|1455|2957|34|2|6|3|277|100|21|27.35|77.40|33.28|926.52|698.88|574.35|1625.40|20.96|0.00|81.27|698.88|719.84|780.15|801.11|124.53| +2451489|45554|2451539|8509|87996|1424698|3447|49349|2066|1191653|1455|2957|56|2|5|1|34|100|80|43.50|76.56|48.23|2266.40|3858.40|3480.00|6124.80|192.92|0.00|2816.80|3858.40|4051.32|6675.20|6868.12|378.40| +2451489|45554|2451569|13148|87996|1424698|3447|49349|2066|1191653|1455|2957|37|2|12|2|25|100|76|99.78|189.58|45.49|10950.84|3457.24|7583.28|14408.08|69.14|0.00|1008.52|3457.24|3526.38|4465.76|4534.90|-4126.04| +2451489|45554|2451564|17030|87996|1424698|3447|49349|2066|1191653|1455|2957|1|7|17|5|223|100|59|3.45|9.48|1.32|481.44|77.88|203.55|559.32|1.55|0.00|257.24|77.88|79.43|335.12|336.67|-125.67| +2451489|45554|2451602|15322|87996|1424698|3447|49349|2066|1191653|1455|2957|8|10|10|1|55|100|89|61.04|87.28|9.60|6913.52|854.40|5432.56|7767.92|59.80|0.00|1165.01|854.40|914.20|2019.41|2079.21|-4578.16| +2451489|45554|2451552|724|87996|1424698|3447|49349|2066|1191653|1455|2957|46|16|4|1|181|100|99|63.05|162.66|58.55|10306.89|5796.45|6241.95|16103.34|64.92|4984.94|3380.85|811.51|876.43|4192.36|4257.28|-5430.44| +2451489|45554|2451609|1364|87996|1424698|3447|49349|2066|1191653|1455|2957|49|22|6|5|167|100|3|76.54|137.77|79.90|173.61|239.70|229.62|413.31|9.58|0.00|99.18|239.70|249.28|338.88|348.46|10.08| +2451489|45554|2451542|15442|87996|1424698|3447|49349|2066|1191653|1455|2957|2|14|4|5|139|100|71|13.07|36.20|23.89|874.01|1696.19|927.97|2570.20|84.80|0.00|796.62|1696.19|1780.99|2492.81|2577.61|768.22| +2451489|45554|2451532|1328|87996|1424698|3447|49349|2066|1191653|1455|2957|22|13|19|4|246|100|86|79.34|153.12|113.30|3424.52|9743.80|6823.24|13168.32|194.87|0.00|6320.14|9743.80|9938.67|16063.94|16258.81|2920.56| +2451489|45554|2451596|14347|87996|1424698|3447|49349|2066|1191653|1455|2957|40|20|1|1|71|100|21|61.75|160.55|97.93|1315.02|2056.53|1296.75|3371.55|185.08|0.00|640.50|2056.53|2241.61|2697.03|2882.11|759.78| +2452540|73343|2452639|16620|44578|1919699|2373|21094|44578|1919699|2373|21094|9|27|10|1|62|101|31|85.50|143.64|53.14|2805.50|1647.34|2650.50|4452.84|16.80|807.19|311.55|840.15|856.95|1151.70|1168.50|-1810.35| +2452540|73343|2452549|10699|44578|1919699|2373|21094|44578|1919699|2373|21094|51|7|15|1|187|101|4|92.93|104.08|55.16|195.68|220.64|371.72|416.32|19.85|0.00|174.84|220.64|240.49|395.48|415.33|-151.08| +2452540|73343|2452632|15708|44578|1919699|2373|21094|44578|1919699|2373|21094|55|27|10|4|145|101|22|51.47|91.61|83.36|181.50|1833.92|1132.34|2015.42|42.91|403.46|584.32|1430.46|1473.37|2014.78|2057.69|298.12| +2452540|73343|2452599|9795|44578|1919699|2373|21094|44578|1919699|2373|21094|48|24|13|4|292|101|50|29.22|56.39|27.63|1438.00|1381.50|1461.00|2819.50|13.81|0.00|817.50|1381.50|1395.31|2199.00|2212.81|-79.50| +2452540|73343|2452589|5989|44578|1919699|2373|21094|44578|1919699|2373|21094|33|12|8|2|139|101|28|29.97|89.91|50.34|1107.96|1409.52|839.16|2517.48|28.19|0.00|679.56|1409.52|1437.71|2089.08|2117.27|570.36| +2452540|73343|2452647|10687|44578|1919699|2373|21094|44578|1919699|2373|21094|55|1|7|3|116|101|19|86.14|221.37|177.09|841.32|3364.71|1636.66|4206.03|134.58|0.00|925.30|3364.71|3499.29|4290.01|4424.59|1728.05| +2452540|73343|2452598|13404|44578|1919699|2373|21094|44578|1919699|2373|21094|39|7|18|5|159|101|4|69.96|189.59|145.98|174.44|583.92|279.84|758.36|5.83|0.00|303.32|583.92|589.75|887.24|893.07|304.08| +2452540|73343|2452626|16807|44578|1919699|2373|21094|44578|1919699|2373|21094|13|13|10|3|143|101|73|49.90|139.72|81.03|4284.37|5915.19|3642.70|10199.56|414.06|0.00|3161.63|5915.19|6329.25|9076.82|9490.88|2272.49| +2452540|73343|2452599|16297|44578|1919699|2373|21094|44578|1919699|2373|21094|57|3|15|3|251|101|5|59.15|131.31|51.21|400.50|256.05|295.75|656.55|7.68|0.00|13.10|256.05|263.73|269.15|276.83|-39.70| +2451304|70566|2451368|2684|10393|1718135|1077|18819|16008|411354|845|13387|56|19|18|1|117|102|64|8.30|13.69|12.59|70.40|805.76|531.20|876.16|2.65|539.85|157.44|265.91|268.56|423.35|426.00|-265.29| +2451304|70566|2451380|16666|10393|1718135|1077|18819|16008|411354|845|13387|1|8|6|5|234|102|29|64.27|187.66|182.03|163.27|5278.87|1863.83|5442.14|316.73|0.00|2612.03|5278.87|5595.60|7890.90|8207.63|3415.04| +2451304|70566|2451368|7840|10393|1718135|1077|18819|16008|411354|845|13387|37|28|14|5|206|102|100|97.87|126.25|10.10|11615.00|1010.00|9787.00|12625.00|20.20|0.00|126.00|1010.00|1030.20|1136.00|1156.20|-8777.00| +2451304|70566|2451331|13642|10393|1718135|1077|18819|16008|411354|845|13387|28|10|10|1|55|102|68|70.29|194.00|64.02|8838.64|4353.36|4779.72|13192.00|0.00|1131.87|3034.16|3221.49|3221.49|6255.65|6255.65|-1558.23| +2451304|70566|2451368|11551|10393|1718135|1077|18819|16008|411354|845|13387|38|14|19|2|55|102|78|70.29|203.13|203.13|0.00|15844.14|5482.62|15844.14|950.64|0.00|5228.34|15844.14|16794.78|21072.48|22023.12|10361.52| +2451304|70566|2451310|8336|10393|1718135|1077|18819|16008|411354|845|13387|55|16|3|4|65|102|42|51.11|86.37|59.59|1124.76|2502.78|2146.62|3627.54|25.02|0.00|1704.78|2502.78|2527.80|4207.56|4232.58|356.16| +2451304|70566|2451408|12763|10393|1718135|1077|18819|16008|411354|845|13387|7|10|8|5|132|102|82|41.04|119.42|62.09|4701.06|5091.38|3365.28|9792.44|203.65|0.00|3525.18|5091.38|5295.03|8616.56|8820.21|1726.10| +2451304|70566|2451401|16840|10393|1718135|1077|18819|16008|411354|845|13387|28|1|4|5|56|102|48|18.86|51.29|17.43|1625.28|836.64|905.28|2461.92|41.83|0.00|1107.84|836.64|878.47|1944.48|1986.31|-68.64| +2451304|70566|2451389|1750|10393|1718135|1077|18819|16008|411354|845|13387|14|7|10|1|202|102|46|25.47|55.01|49.50|253.46|2277.00|1171.62|2530.46|50.54|1434.51|25.30|842.49|893.03|867.79|918.33|-329.13| +2451304|70566|2451393|1904|10393|1718135|1077|18819|16008|411354|845|13387|58|25|6|4|262|102|22|79.71|216.01|153.36|1378.30|3373.92|1753.62|4752.22|168.69|0.00|1140.48|3373.92|3542.61|4514.40|4683.09|1620.30| +2451304|70566|2451306|4153|10393|1718135|1077|18819|16008|411354|845|13387|37|8|8|4|300|102|66|40.18|73.93|27.35|3074.28|1805.10|2651.88|4879.38|0.00|0.00|390.06|1805.10|1805.10|2195.16|2195.16|-846.78| +2451304|70566|2451317|2420|10393|1718135|1077|18819|16008|411354|845|13387|37|4|20|4|98|102|98|79.64|166.44|6.65|15659.42|651.70|7804.72|16311.12|4.10|241.12|3261.44|410.58|414.68|3672.02|3676.12|-7394.14| +2451304|70566|2451357|14186|10393|1718135|1077|18819|16008|411354|845|13387|4|25|13|3|163|102|31|26.89|43.29|14.28|899.31|442.68|833.59|1341.99|4.42|0.00|630.54|442.68|447.10|1073.22|1077.64|-390.91| +2451801|66921|2451807|9499|49335|1467614|2693|31772|92727|1146590|5056|8982|25|1|9|2|265|103|63|67.89|105.90|31.77|4670.19|2001.51|4277.07|6671.70|60.04|0.00|1267.56|2001.51|2061.55|3269.07|3329.11|-2275.56| +2451801|66921|2451869|12812|49335|1467614|2693|31772|92727|1146590|5056|8982|13|11|15|2|236|103|38|11.96|22.72|22.49|8.74|854.62|454.48|863.36|59.82|0.00|379.62|854.62|914.44|1234.24|1294.06|400.14| +2451801|66921|2451913|15851|49335|1467614|2693|31772|92727|1146590|5056|8982|31|5|13|3|273|103|17|21.30|28.75|6.32|381.31|107.44|362.10|488.75|3.22|0.00|24.31|107.44|110.66|131.75|134.97|-254.66| +2451801|66921|2451818|13183|49335|1467614|2693|31772|92727|1146590|5056|8982|11|13|8|4|153|103|42|90.25|91.15|29.16|2603.58|1224.72|3790.50|3828.30|36.74|0.00|306.18|1224.72|1261.46|1530.90|1567.64|-2565.78| +2451801|66921|2451809|9577|49335|1467614|2693|31772|92727|1146590|5056|8982|2|13|5|3|271|103|53|24.93|29.91|16.15|729.28|855.95|1321.29|1585.23|8.55|0.00|443.61|855.95|864.50|1299.56|1308.11|-465.34| +2451801|66921|2451812|13781|49335|1467614|2693|31772|92727|1146590|5056|8982|2|8|8|1|141|103|34|67.19|124.97|77.48|1614.66|2634.32|2284.46|4248.98|56.90|1685.96|1317.16|948.36|1005.26|2265.52|2322.42|-1336.10| +2451801|66921|2451855|12643|49335|1467614|2693|31772|92727|1146590|5056|8982|59|26|1|5|109|103|80|38.90|93.36|41.07|4183.20|3285.60|3112.00|7468.80|165.59|1445.66|3360.80|1839.94|2005.53|5200.74|5366.33|-1272.06| +2451801|66921|2451853|697|49335|1467614|2693|31772|92727|1146590|5056|8982|31|8|14|3|32|103|45|97.50|206.70|128.15|3534.75|5766.75|4387.50|9301.50|0.00|0.00|4185.45|5766.75|5766.75|9952.20|9952.20|1379.25| +2451801|66921|2451806|8258|49335|1467614|2693|31772|92727|1146590|5056|8982|13|19|6|4|5|103|9|13.43|29.00|28.13|7.83|253.17|120.87|261.00|22.78|0.00|39.15|253.17|275.95|292.32|315.10|132.30| +2451801|66921|2451843|13649|49335|1467614|2693|31772|92727|1146590|5056|8982|11|19|4|5|209|103|10|11.20|24.19|20.31|38.80|203.10|112.00|241.90|4.06|0.00|53.20|203.10|207.16|256.30|260.36|91.10| +2451801|66921|2451868|10775|49335|1467614|2693|31772|92727|1146590|5056|8982|13|7|11|3|235|103|30|74.32|217.75|71.85|4377.00|2155.50|2229.60|6532.50|43.11|0.00|391.80|2155.50|2198.61|2547.30|2590.41|-74.10| +2451801|66921|2451821|4183|49335|1467614|2693|31772|92727|1146590|5056|8982|26|17|14|1|1|103|45|39.20|111.32|38.96|3256.20|1753.20|1764.00|5009.40|105.19|0.00|250.20|1753.20|1858.39|2003.40|2108.59|-10.80| +2451801|66921|2451876|11579|49335|1467614|2693|31772|92727|1146590|5056|8982|13|20|12|3|187|103|14|40.94|73.69|26.52|660.38|371.28|573.16|1031.66|3.71|0.00|92.82|371.28|374.99|464.10|467.81|-201.88| +2451801|66921|2451838|3997|49335|1467614|2693|31772|92727|1146590|5056|8982|8|8|12|2|229|103|36|67.84|107.86|4.31|3727.80|155.16|2442.24|3882.96|9.30|0.00|1669.32|155.16|164.46|1824.48|1833.78|-2287.08| +2451399|78997|2451452|2566|20053|1385627|1|45975|74401|1501553|5944|47518|25|16|5|1|117|104|66|19.51|25.36|20.54|318.12|1355.64|1287.66|1673.76|0.00|0.00|485.10|1355.64|1355.64|1840.74|1840.74|67.98| +2451399|78997|2451457|16580|20053|1385627|1|45975|74401|1501553|5944|47518|37|13|2|5|128|104|64|15.97|28.10|20.51|485.76|1312.64|1022.08|1798.40|65.63|0.00|449.28|1312.64|1378.27|1761.92|1827.55|290.56| +2451399|78997|2451473|7327|20053|1385627|1|45975|74401|1501553|5944|47518|50|20|3|5|250|104|45|15.18|16.84|0.50|735.30|22.50|683.10|757.80|1.35|0.00|280.35|22.50|23.85|302.85|304.20|-660.60| +2451399|78997|2451400|1975|20053|1385627|1|45975|74401|1501553|5944|47518|19|28|17|4|194|104|30|19.45|28.59|19.44|274.50|583.20|583.50|857.70|40.82|0.00|265.80|583.20|624.02|849.00|889.82|-0.30| +2451399|78997|2451488|7981|20053|1385627|1|45975|74401|1501553|5944|47518|10|28|11|5|109|104|93|57.81|126.60|86.08|3768.36|8005.44|5376.33|11773.80|80.05|0.00|5650.68|8005.44|8085.49|13656.12|13736.17|2629.11| +2451399|78997|2451419|5366|20053|1385627|1|45975|74401|1501553|5944|47518|13|20|15|4|83|104|20|71.81|113.45|38.57|1497.60|771.40|1436.20|2269.00|23.14|0.00|22.60|771.40|794.54|794.00|817.14|-664.80| +2451399|78997|2451460|836|20053|1385627|1|45975|74401|1501553|5944|47518|2|25|17|1|272|104|29|33.43|60.84|38.93|635.39|1128.97|969.47|1764.36|2.48|880.59|70.47|248.38|250.86|318.85|321.33|-721.09| +2451399|78997|2451415|28|20053|1385627|1|45975|74401|1501553|5944|47518|13|7|8|5|212|104|93|61.68|119.65|71.79|4450.98|6676.47|5736.24|11127.45|97.47|1802.64|3783.24|4873.83|4971.30|8657.07|8754.54|-862.41| +2451399|78997|2451505|15856|20053|1385627|1|45975|74401|1501553|5944|47518|44|4|16|4|162|104|43|46.75|67.32|28.27|1679.15|1215.61|2010.25|2894.76|97.24|0.00|781.31|1215.61|1312.85|1996.92|2094.16|-794.64| +2451399|78997|2451446|1808|20053|1385627|1|45975|74401|1501553|5944|47518|22|10|13|2|274|104|48|96.88|268.35|83.18|8888.16|3992.64|4650.24|12880.80|39.92|0.00|3219.84|3992.64|4032.56|7212.48|7252.40|-657.60| +2451399|78997|2451442|11455|20053|1385627|1|45975|74401|1501553|5944|47518|46|2|7|5|33|104|46|13.01|35.77|24.32|526.70|1118.72|598.46|1645.42|78.31|0.00|822.48|1118.72|1197.03|1941.20|2019.51|520.26| +2452631|48867|2452639|4227|53098|1635631|6517|3050|32917|1061448|5802|47535|30|18|3|2|51|105|61|79.32|134.05|50.93|5070.32|3106.73|4838.52|8177.05|186.40|0.00|3516.04|3106.73|3293.13|6622.77|6809.17|-1731.79| +2452631|48867|2452729|12030|53098|1635631|6517|3050|32917|1061448|5802|47535|54|19|6|4|72|105|68|43.97|58.04|9.28|3315.68|631.04|2989.96|3946.72|45.43|63.10|1736.04|567.94|613.37|2303.98|2349.41|-2422.02| +2452631|48867|2452667|6007|53098|1635631|6517|3050|32917|1061448|5802|47535|48|15|19|3|214|105|58|87.04|208.02|191.37|965.70|11099.46|5048.32|12065.16|332.98|0.00|2895.36|11099.46|11432.44|13994.82|14327.80|6051.14| +2452631|48867|2452667|16545|53098|1635631|6517|3050|32917|1061448|5802|47535|57|15|17|1|215|105|86|32.79|37.05|22.97|1210.88|1975.42|2819.94|3186.30|79.01|0.00|381.84|1975.42|2054.43|2357.26|2436.27|-844.52| +2452631|48867|2452709|3009|53098|1635631|6517|3050|32917|1061448|5802|47535|1|18|7|5|26|105|11|13.74|36.13|6.50|325.93|71.50|151.14|397.43|1.43|0.00|150.92|71.50|72.93|222.42|223.85|-79.64| +2452631|48867|2452672|10311|53098|1635631|6517|3050|32917|1061448|5802|47535|51|1|19|4|122|105|65|55.00|164.45|23.02|9192.95|1496.30|3575.00|10689.25|74.81|0.00|5130.45|1496.30|1571.11|6626.75|6701.56|-2078.70| +2452631|48867|2452724|14829|53098|1635631|6517|3050|32917|1061448|5802|47535|18|9|11|5|106|105|79|38.58|42.05|36.58|432.13|2889.82|3047.82|3321.95|115.59|0.00|1494.68|2889.82|3005.41|4384.50|4500.09|-158.00| +2452631|48867|2452677|253|53098|1635631|6517|3050|32917|1061448|5802|47535|37|21|3|5|227|105|16|50.94|69.27|16.62|842.40|265.92|815.04|1108.32|13.29|0.00|398.88|265.92|279.21|664.80|678.09|-549.12| +2451543|52256|2451607|7480|64090|1631987|5931|5828|68628|931425|4366|33910|46|4|15|2|99|106|2|58.91|174.37|31.38|285.98|62.76|117.82|348.74|1.58|40.16|83.68|22.60|24.18|106.28|107.86|-95.22| +2451543|52256|2451635|6049|64090|1631987|5931|5828|68628|931425|4366|33910|25|4|13|5|100|106|61|40.57|87.63|53.45|2084.98|3260.45|2474.77|5345.43|36.51|2738.77|53.07|521.68|558.19|574.75|611.26|-1953.09| +2451543|52256|2451610|12253|64090|1631987|5931|5828|68628|931425|4366|33910|26|22|13|4|105|106|38|27.85|37.59|5.26|1228.54|199.88|1058.30|1428.42|9.99|0.00|471.20|199.88|209.87|671.08|681.07|-858.42| +2451543|52256|2451597|14788|64090|1631987|5931|5828|68628|931425|4366|33910|32|14|19|5|131|106|99|60.39|169.69|6.78|16128.09|671.22|5978.61|16799.31|0.00|0.00|1175.13|671.22|671.22|1846.35|1846.35|-5307.39| +2451543|52256|2451660|15571|64090|1631987|5931|5828|68628|931425|4366|33910|25|13|4|3|132|106|7|59.17|130.17|0.00|911.19|0.00|414.19|911.19|0.00|0.00|346.22|0.00|0.00|346.22|346.22|-414.19| +2451543|52256|2451656|8302|64090|1631987|5931|5828|68628|931425|4366|33910|14|22|3|2|248|106|23|13.54|38.31|9.57|661.02|220.11|311.42|881.13|6.60|0.00|26.22|220.11|226.71|246.33|252.93|-91.31| +2451543|52256|2451574|4561|64090|1631987|5931|5828|68628|931425|4366|33910|4|26|12|3|271|106|70|72.72|212.34|188.98|1635.20|13228.60|5090.40|14863.80|132.28|0.00|1337.70|13228.60|13360.88|14566.30|14698.58|8138.20| +2451543|52256|2451559|5906|64090|1631987|5931|5828|68628|931425|4366|33910|7|28|3|4|235|106|17|93.18|165.86|14.92|2565.98|253.64|1584.06|2819.62|12.68|0.00|873.97|253.64|266.32|1127.61|1140.29|-1330.42| +2451543|52256|2451551|11128|64090|1631987|5931|5828|68628|931425|4366|33910|26|16|11|3|135|106|84|74.07|186.65|63.46|10347.96|5330.64|6221.88|15678.60|266.53|0.00|3762.36|5330.64|5597.17|9093.00|9359.53|-891.24| +2451543|52256|2451624|13357|64090|1631987|5931|5828|68628|931425|4366|33910|49|10|14|5|196|106|10|26.53|49.87|40.89|89.80|408.90|265.30|498.70|8.17|0.00|4.90|408.90|417.07|413.80|421.97|143.60| +2451543|52256|2451661|10306|64090|1631987|5931|5828|68628|931425|4366|33910|1|2|3|4|275|106|87|66.12|76.69|71.32|467.19|6204.84|5752.44|6672.03|62.04|0.00|1133.61|6204.84|6266.88|7338.45|7400.49|452.40| +2452514|44258|2452551|9702|84254|286093|2332|40971|56255|1211796|3288|16280|21|21|14|2|50|107|87|81.12|200.36|4.00|17083.32|348.00|7057.44|17431.32|0.00|0.00|8540.79|348.00|348.00|8888.79|8888.79|-6709.44| +2452514|44258|2452631|1641|84254|286093|2332|40971|56255|1211796|3288|16280|33|25|9|2|68|107|6|32.83|52.85|41.22|69.78|247.32|196.98|317.10|0.00|0.00|34.86|247.32|247.32|282.18|282.18|50.34| +2452514|44258|2452551|4099|84254|286093|2332|40971|56255|1211796|3288|16280|51|15|17|1|268|107|13|8.21|13.13|0.52|163.93|6.76|106.73|170.69|0.00|0.00|27.30|6.76|6.76|34.06|34.06|-99.97| +2452514|44258|2452582|8958|84254|286093|2332|40971|56255|1211796|3288|16280|45|24|11|2|256|107|68|68.75|76.31|67.15|622.88|4566.20|4675.00|5189.08|0.00|4246.56|2542.52|319.64|319.64|2862.16|2862.16|-4355.36| +2452514|44258|2452527|17131|84254|286093|2332|40971|56255|1211796|3288|16280|36|7|3|4|8|107|81|71.52|142.32|96.77|3689.55|7838.37|5793.12|11527.92|391.91|0.00|2536.11|7838.37|8230.28|10374.48|10766.39|2045.25| +2452514|44258|2452571|7831|84254|286093|2332|40971|56255|1211796|3288|16280|24|19|15|3|228|107|75|14.97|24.25|10.42|1037.25|781.50|1122.75|1818.75|31.26|0.00|836.25|781.50|812.76|1617.75|1649.01|-341.25| +2452514|44258|2452613|12825|84254|286093|2332|40971|56255|1211796|3288|16280|36|12|7|5|23|107|70|26.02|34.60|5.53|2034.90|387.10|1821.40|2422.00|27.09|0.00|1040.90|387.10|414.19|1428.00|1455.09|-1434.30| +2452514|44258|2452574|3745|84254|286093|2332|40971|56255|1211796|3288|16280|55|27|3|4|78|107|45|76.52|205.07|166.10|1753.65|7474.50|3443.40|9228.15|597.96|0.00|1476.45|7474.50|8072.46|8950.95|9548.91|4031.10| +2451262|36450|2451375|16|64734|1100959|1184|28609|89258|1248852|2084|47109|20|20|4|4|237|108|4|1.52|3.72|2.93|3.16|11.72|6.08|14.88|0.82|0.00|6.84|11.72|12.54|18.56|19.38|5.64| +2451262|36450|2451378|2408|64734|1100959|1184|28609|89258|1248852|2084|47109|50|2|8|4|80|108|42|57.11|100.51|2.01|4137.00|84.42|2398.62|4221.42|0.00|0.00|379.68|84.42|84.42|464.10|464.10|-2314.20| +2451262|36450|2451360|16178|64734|1100959|1184|28609|89258|1248852|2084|47109|37|28|18|4|130|108|90|22.11|55.93|50.33|504.00|4529.70|1989.90|5033.70|407.67|0.00|804.60|4529.70|4937.37|5334.30|5741.97|2539.80| +2451262|36450|2451305|11641|64734|1100959|1184|28609|89258|1248852|2084|47109|28|4|11|2|230|108|36|8.38|8.54|0.42|292.32|15.12|301.68|307.44|0.30|0.00|73.44|15.12|15.42|88.56|88.86|-286.56| +2451262|36450|2451354|1897|64734|1100959|1184|28609|89258|1248852|2084|47109|1|28|19|3|200|108|90|20.82|27.27|25.08|197.10|2257.20|1873.80|2454.30|180.57|0.00|564.30|2257.20|2437.77|2821.50|3002.07|383.40| +2451262|36450|2451281|1465|64734|1100959|1184|28609|89258|1248852|2084|47109|32|26|18|5|72|108|2|65.83|168.52|0.00|337.04|0.00|131.66|337.04|0.00|0.00|20.22|0.00|0.00|20.22|20.22|-131.66| +2451262|36450|2451344|16429|64734|1100959|1184|28609|89258|1248852|2084|47109|56|14|17|1|275|108|25|78.25|155.71|45.15|2764.00|1128.75|1956.25|3892.75|67.72|0.00|1868.50|1128.75|1196.47|2997.25|3064.97|-827.50| +2451262|36450|2451322|9544|64734|1100959|1184|28609|89258|1248852|2084|47109|40|4|12|4|16|108|12|59.96|137.90|119.97|215.16|1439.64|719.52|1654.80|86.37|0.00|463.32|1439.64|1526.01|1902.96|1989.33|720.12| +2451262|36450|2451348|5617|64734|1100959|1184|28609|89258|1248852|2084|47109|38|28|20|5|180|108|3|20.17|56.87|7.96|146.73|23.88|60.51|170.61|0.71|0.00|56.28|23.88|24.59|80.16|80.87|-36.63| +2451262|36450|2451285|10292|64734|1100959|1184|28609|89258|1248852|2084|47109|31|19|4|3|112|108|91|1.49|2.74|0.43|210.21|39.13|135.59|249.34|0.78|0.00|57.33|39.13|39.91|96.46|97.24|-96.46| +2451262|36450|2451378|14335|64734|1100959|1184|28609|89258|1248852|2084|47109|25|26|15|1|47|108|80|33.54|74.12|42.98|2491.20|3438.40|2683.20|5929.60|275.07|0.00|2016.00|3438.40|3713.47|5454.40|5729.47|755.20| +2451262|36450|2451334|2402|64734|1100959|1184|28609|89258|1248852|2084|47109|34|8|2|4|266|108|97|76.34|90.08|36.93|5155.55|3582.21|7404.98|8737.76|35.82|0.00|436.50|3582.21|3618.03|4018.71|4054.53|-3822.77| +2451734|36966|2451816|1841|44486|616499|6559|1019|74634|1620204|4752|3965|2|17|6|3|300|109|54|79.85|189.24|52.98|7358.04|2860.92|4311.90|10218.96|257.48|0.00|3474.36|2860.92|3118.40|6335.28|6592.76|-1450.98| +2451734|36966|2451784|9719|44486|616499|6559|1019|74634|1620204|4752|3965|50|29|1|2|273|109|36|61.67|178.84|110.88|2446.56|3991.68|2220.12|6438.24|319.33|0.00|3090.24|3991.68|4311.01|7081.92|7401.25|1771.56| +2451734|36966|2451768|17579|44486|616499|6559|1019|74634|1620204|4752|3965|26|29|6|2|205|109|48|18.39|45.60|39.21|306.72|1882.08|882.72|2188.80|37.64|0.00|678.24|1882.08|1919.72|2560.32|2597.96|999.36| +2451734|36966|2451848|7657|44486|616499|6559|1019|74634|1620204|4752|3965|26|25|2|1|280|109|60|22.50|46.80|19.18|1657.20|1150.80|1350.00|2808.00|67.66|184.12|0.00|966.68|1034.34|966.68|1034.34|-383.32| +2451734|36966|2451854|8921|44486|616499|6559|1019|74634|1620204|4752|3965|59|23|17|4|191|109|9|58.35|61.85|40.82|189.27|367.38|525.15|556.65|3.67|0.00|55.62|367.38|371.05|423.00|426.67|-157.77| +2451734|36966|2451853|10304|44486|616499|6559|1019|74634|1620204|4752|3965|35|7|3|4|135|109|62|47.58|56.14|25.82|1879.84|1600.84|2949.96|3480.68|64.03|0.00|835.14|1600.84|1664.87|2435.98|2500.01|-1349.12| +2451734|36966|2451801|17033|44486|616499|6559|1019|74634|1620204|4752|3965|44|2|14|3|112|109|83|25.29|58.41|35.04|1939.71|2908.32|2099.07|4848.03|145.41|0.00|678.11|2908.32|3053.73|3586.43|3731.84|809.25| +2451734|36966|2451811|17624|44486|616499|6559|1019|74634|1620204|4752|3965|47|11|15|2|109|109|69|11.22|32.08|27.58|310.50|1903.02|774.18|2213.52|20.55|875.38|287.73|1027.64|1048.19|1315.37|1335.92|253.46| +2451734|36966|2451751|16097|44486|616499|6559|1019|74634|1620204|4752|3965|59|26|11|5|139|109|32|44.18|69.80|23.03|1496.64|736.96|1413.76|2233.60|29.47|0.00|558.40|736.96|766.43|1295.36|1324.83|-676.80| +2451734|36966|2451763|5587|44486|616499|6559|1019|74634|1620204|4752|3965|56|20|20|2|114|109|26|73.70|168.03|166.34|43.94|4324.84|1916.20|4368.78|259.49|0.00|1572.74|4324.84|4584.33|5897.58|6157.07|2408.64| +2451734|36966|2451774|4003|44486|616499|6559|1019|74634|1620204|4752|3965|50|13|15|3|241|109|97|58.70|78.65|47.97|2975.96|4653.09|5693.90|7629.05|372.24|0.00|152.29|4653.09|5025.33|4805.38|5177.62|-1040.81| +2451734|36966|2451793|8551|44486|616499|6559|1019|74634|1620204|4752|3965|49|20|17|3|149|109|17|53.85|54.92|25.81|494.87|438.77|915.45|933.64|13.16|0.00|18.53|438.77|451.93|457.30|470.46|-476.68| +2451734|36966|2451804|14111|44486|616499|6559|1019|74634|1620204|4752|3965|55|1|17|5|26|109|33|14.68|42.86|30.85|396.33|1018.05|484.44|1414.38|20.36|0.00|254.43|1018.05|1038.41|1272.48|1292.84|533.61| +2451734|36966|2451754|14189|44486|616499|6559|1019|74634|1620204|4752|3965|35|11|4|4|277|109|70|20.80|41.60|2.08|2766.40|145.60|1456.00|2912.00|10.19|0.00|698.60|145.60|155.79|844.20|854.39|-1310.40| +2451734|36966|2451839|1970|44486|616499|6559|1019|74634|1620204|4752|3965|5|23|4|1|25|109|11|88.95|128.97|82.54|510.73|907.94|978.45|1418.67|45.39|0.00|0.00|907.94|953.33|907.94|953.33|-70.51| +2451734|36966|2451828|4805|44486|616499|6559|1019|74634|1620204|4752|3965|50|13|11|2|26|109|87|89.49|259.52|41.52|18966.00|3612.24|7785.63|22578.24|97.53|361.22|7676.01|3251.02|3348.55|10927.03|11024.56|-4534.61| +2451904|38622|2452018|9077|11364|1823136|508|280|1585|1022397|1196|2168|55|5|8|3|288|110|36|8.73|24.96|15.22|350.64|547.92|314.28|898.56|43.83|0.00|305.28|547.92|591.75|853.20|897.03|233.64| +2451904|38622|2451952|7544|11364|1823136|508|280|1585|1022397|1196|2168|53|20|12|1|67|110|46|94.91|282.83|280.00|130.18|12880.00|4365.86|13010.18|644.00|0.00|5203.98|12880.00|13524.00|18083.98|18727.98|8514.14| +2451904|38622|2451970|2309|11364|1823136|508|280|1585|1022397|1196|2168|35|23|3|4|98|110|18|93.44|231.73|222.46|166.86|4004.28|1681.92|4171.14|80.08|0.00|583.92|4004.28|4084.36|4588.20|4668.28|2322.36| +2451904|38622|2452021|13505|11364|1823136|508|280|1585|1022397|1196|2168|20|20|18|3|48|110|4|23.94|44.28|33.65|42.52|134.60|95.76|177.12|6.73|0.00|5.28|134.60|141.33|139.88|146.61|38.84| +2451904|38622|2451947|3020|11364|1823136|508|280|1585|1022397|1196|2168|8|25|4|2|293|110|93|63.14|70.71|0.70|6510.93|65.10|5872.02|6576.03|2.60|0.00|1709.34|65.10|67.70|1774.44|1777.04|-5806.92| +2451904|38622|2451963|1196|11364|1823136|508|280|1585|1022397|1196|2168|43|7|16|3|10|110|35|96.55|100.41|55.22|1581.65|1932.70|3379.25|3514.35|1.93|1836.06|1335.25|96.64|98.57|1431.89|1433.82|-3282.61| +2451904|38622|2451915|6331|11364|1823136|508|280|1585|1022397|1196|2168|11|17|5|3|34|110|90|69.39|172.08|86.04|7743.60|7743.60|6245.10|15487.20|542.05|0.00|1083.60|7743.60|8285.65|8827.20|9369.25|1498.50| +2451904|38622|2451968|968|11364|1823136|508|280|1585|1022397|1196|2168|50|1|9|4|182|110|9|30.38|49.82|49.82|0.00|448.38|273.42|448.38|13.45|0.00|197.28|448.38|461.83|645.66|659.11|174.96| +2451904|38622|2451959|13127|11364|1823136|508|280|1585|1022397|1196|2168|37|20|20|2|294|110|12|83.42|115.11|112.80|27.72|1353.60|1001.04|1381.32|40.60|0.00|469.56|1353.60|1394.20|1823.16|1863.76|352.56| +2451904|38622|2451963|6761|11364|1823136|508|280|1585|1022397|1196|2168|55|29|2|4|149|110|45|18.11|51.43|34.97|740.70|1573.65|814.95|2314.35|78.68|0.00|486.00|1573.65|1652.33|2059.65|2138.33|758.70| +2451904|38622|2452001|11630|11364|1823136|508|280|1585|1022397|1196|2168|17|8|18|4|247|110|53|95.41|209.90|159.52|2670.14|8454.56|5056.73|11124.70|338.18|0.00|2224.94|8454.56|8792.74|10679.50|11017.68|3397.83| +2451904|38622|2451975|15901|11364|1823136|508|280|1585|1022397|1196|2168|50|7|5|3|81|110|42|85.97|112.62|29.28|3500.28|1229.76|3610.74|4730.04|86.08|0.00|2081.10|1229.76|1315.84|3310.86|3396.94|-2380.98| +2451904|38622|2451974|17156|11364|1823136|508|280|1585|1022397|1196|2168|13|23|17|2|27|110|86|89.21|96.34|63.58|2817.36|5467.88|7672.06|8285.24|313.85|984.21|2070.88|4483.67|4797.52|6554.55|6868.40|-3188.39| +2452248|60861|2452283|14015|95403|1522037|5024|7658|52062|966341|5866|34900|15|11|4|2|113|111|5|2.28|6.36|6.16|1.00|30.80|11.40|31.80|0.30|0.00|15.90|30.80|31.10|46.70|47.00|19.40| +2452248|60861|2452284|5785|95403|1522037|5024|7658|52062|966341|5866|34900|53|5|16|2|88|111|10|3.19|4.68|0.65|40.30|6.50|31.90|46.80|0.13|0.00|6.00|6.50|6.63|12.50|12.63|-25.40| +2452248|60861|2452311|8125|95403|1522037|5024|7658|52062|966341|5866|34900|45|25|3|3|43|111|62|52.55|118.23|96.94|1319.98|6010.28|3258.10|7330.26|420.71|0.00|1979.04|6010.28|6430.99|7989.32|8410.03|2752.18| +2452248|60861|2452252|5099|95403|1522037|5024|7658|52062|966341|5866|34900|35|23|6|5|271|111|5|96.31|205.14|108.72|482.10|543.60|481.55|1025.70|5.43|0.00|0.00|543.60|549.03|543.60|549.03|62.05| +2452248|60861|2452320|6593|95403|1522037|5024|7658|52062|966341|5866|34900|45|25|13|5|260|111|93|97.38|282.40|220.27|5778.09|20485.11|9056.34|26263.20|409.70|0.00|9192.12|20485.11|20894.81|29677.23|30086.93|11428.77| +2452248|60861|2452346|11909|95403|1522037|5024|7658|52062|966341|5866|34900|49|3|16|3|300|111|19|4.25|6.03|1.92|78.09|36.48|80.75|114.57|0.50|19.69|37.62|16.79|17.29|54.41|54.91|-63.96| +2452248|60861|2452252|1219|95403|1522037|5024|7658|52062|966341|5866|34900|21|15|15|2|269|111|61|60.64|79.43|43.68|2180.75|2664.48|3699.04|4845.23|186.51|0.00|823.50|2664.48|2850.99|3487.98|3674.49|-1034.56| +2452248|60861|2452301|607|95403|1522037|5024|7658|52062|966341|5866|34900|9|23|9|4|236|111|28|98.39|225.31|6.75|6119.68|189.00|2754.92|6308.68|15.12|0.00|2901.92|189.00|204.12|3090.92|3106.04|-2565.92| +2452248|60861|2452328|2369|95403|1522037|5024|7658|52062|966341|5866|34900|7|7|14|4|274|111|52|1.53|4.08|0.81|170.04|42.12|79.56|212.16|0.84|0.00|22.88|42.12|42.96|65.00|65.84|-37.44| +2452248|60861|2452276|12715|95403|1522037|5024|7658|52062|966341|5866|34900|19|13|1|3|192|111|51|60.51|101.65|64.03|1918.62|3265.53|3086.01|5184.15|11.75|3069.59|1554.99|195.94|207.69|1750.93|1762.68|-2890.07| +2452248|60861|2452362|14245|95403|1522037|5024|7658|52062|966341|5866|34900|25|21|2|5|208|111|53|12.46|23.54|11.29|649.25|598.37|660.38|1247.62|47.86|0.00|162.18|598.37|646.23|760.55|808.41|-62.01| +2452248|60861|2452363|11263|95403|1522037|5024|7658|52062|966341|5866|34900|59|23|14|3|25|111|99|40.55|66.50|65.17|131.67|6451.83|4014.45|6583.50|63.22|3290.43|723.69|3161.40|3224.62|3885.09|3948.31|-853.05| +2451102|51665|2451114|3932|38145|706220|7095|3217|18735|1616489|5889|28764|20|22|20|3|149|112|77|33.03|36.00|1.80|2633.40|138.60|2543.31|2772.00|5.54|0.00|471.24|138.60|144.14|609.84|615.38|-2404.71| +2451102|51665|2451146|11294|38145|706220|7095|3217|18735|1616489|5889|28764|22|28|8|3|271|112|75|36.50|86.87|83.39|261.00|6254.25|2737.50|6515.25|562.88|0.00|1954.50|6254.25|6817.13|8208.75|8771.63|3516.75| +2451102|51665|2451207|8833|38145|706220|7095|3217|18735|1616489|5889|28764|13|28|6|4|212|112|22|86.95|149.55|0.00|3290.10|0.00|1912.90|3290.10|0.00|0.00|1348.82|0.00|0.00|1348.82|1348.82|-1912.90| +2451102|51665|2451200|9751|38145|706220|7095|3217|18735|1616489|5889|28764|2|28|8|1|181|112|14|23.44|30.94|16.08|208.04|225.12|328.16|433.16|6.75|0.00|0.00|225.12|231.87|225.12|231.87|-103.04| +2451102|51665|2451169|5707|38145|706220|7095|3217|18735|1616489|5889|28764|52|26|18|5|72|112|78|62.85|175.98|170.70|411.84|13314.60|4902.30|13726.44|197.05|3461.79|3568.50|9852.81|10049.86|13421.31|13618.36|4950.51| +2451102|51665|2451181|11576|38145|706220|7095|3217|18735|1616489|5889|28764|49|28|5|3|98|112|69|29.32|77.40|71.98|373.98|4966.62|2023.08|5340.60|71.51|1390.65|2029.29|3575.97|3647.48|5605.26|5676.77|1552.89| +2451102|51665|2451124|14108|38145|706220|7095|3217|18735|1616489|5889|28764|19|19|13|1|206|112|95|84.72|158.42|125.15|3160.65|11889.25|8048.40|15049.90|0.00|0.00|2558.35|11889.25|11889.25|14447.60|14447.60|3840.85| +2451102|51665|2451151|9788|38145|706220|7095|3217|18735|1616489|5889|28764|7|22|3|1|209|112|8|9.21|19.06|9.72|74.72|77.76|73.68|152.48|1.64|36.54|60.96|41.22|42.86|102.18|103.82|-32.46| +2451102|51665|2451130|578|38145|706220|7095|3217|18735|1616489|5889|28764|31|1|19|2|32|112|2|62.33|152.08|48.66|206.84|97.32|124.66|304.16|3.40|29.19|48.66|68.13|71.53|116.79|120.19|-56.53| +2451102|51665|2451189|721|38145|706220|7095|3217|18735|1616489|5889|28764|7|19|6|2|88|112|34|7.80|23.40|14.27|310.42|485.18|265.20|795.60|43.66|0.00|111.18|485.18|528.84|596.36|640.02|219.98| +2451102|51665|2451219|772|38145|706220|7095|3217|18735|1616489|5889|28764|4|8|7|4|94|112|54|18.72|25.64|5.38|1094.04|290.52|1010.88|1384.56|2.90|0.00|262.98|290.52|293.42|553.50|556.40|-720.36| +2451102|51665|2451159|2521|38145|706220|7095|3217|18735|1616489|5889|28764|19|16|18|2|66|112|3|97.95|225.28|148.68|229.80|446.04|293.85|675.84|19.00|129.35|195.99|316.69|335.69|512.68|531.68|22.84| +2451102|51665|2451174|11065|38145|706220|7095|3217|18735|1616489|5889|28764|31|10|6|1|36|112|21|19.69|27.76|26.92|17.64|565.32|413.49|582.96|11.30|0.00|5.67|565.32|576.62|570.99|582.29|151.83| +2451878|54989|2451916|17690|11575|599644|2113|44229|81040|173113|2036|26552|23|14|4|5|93|113|30|85.87|211.24|173.21|1140.90|5196.30|2576.10|6337.20|311.77|0.00|63.30|5196.30|5508.07|5259.60|5571.37|2620.20| +2451878|54989|2451933|6803|11575|599644|2113|44229|81040|173113|2036|26552|29|20|7|3|299|113|29|57.63|154.44|46.33|3135.19|1343.57|1671.27|4478.76|0.00|0.00|1074.74|1343.57|1343.57|2418.31|2418.31|-327.70| +2451878|54989|2451928|2059|11575|599644|2113|44229|81040|173113|2036|26552|38|25|3|3|223|113|54|4.79|10.15|9.54|32.94|515.16|258.66|548.10|1.13|401.82|180.36|113.34|114.47|293.70|294.83|-145.32| +2451878|54989|2451955|2675|11575|599644|2113|44229|81040|173113|2036|26552|49|23|12|2|169|113|6|77.77|174.98|69.99|629.94|419.94|466.62|1049.88|0.25|394.74|0.00|25.20|25.45|25.20|25.45|-441.42| +2451878|54989|2451945|3013|11575|599644|2113|44229|81040|173113|2036|26552|49|13|2|4|59|113|27|96.35|188.84|173.73|407.97|4690.71|2601.45|5098.68|281.44|0.00|1835.46|4690.71|4972.15|6526.17|6807.61|2089.26| +2451878|54989|2451952|17425|11575|599644|2113|44229|81040|173113|2036|26552|49|5|11|1|94|113|7|17.75|44.90|26.94|125.72|188.58|124.25|314.30|11.31|0.00|91.14|188.58|199.89|279.72|291.03|64.33| +2451878|54989|2451996|12950|11575|599644|2113|44229|81040|173113|2036|26552|14|11|14|5|203|113|75|10.67|16.53|10.57|447.00|792.75|800.25|1239.75|63.42|0.00|446.25|792.75|856.17|1239.00|1302.42|-7.50| +2451878|54989|2451950|455|11575|599644|2113|44229|81040|173113|2036|26552|43|14|4|5|296|113|51|96.52|126.44|63.22|3224.22|3224.22|4922.52|6448.44|128.96|0.00|386.58|3224.22|3353.18|3610.80|3739.76|-1698.30| +2451878|54989|2451965|4298|11575|599644|2113|44229|81040|173113|2036|26552|5|1|6|1|279|113|68|3.77|6.10|5.73|25.16|389.64|256.36|414.80|7.79|0.00|207.40|389.64|397.43|597.04|604.83|133.28| +2451878|54989|2451915|7639|11575|599644|2113|44229|81040|173113|2036|26552|29|25|1|1|14|113|54|10.56|30.30|5.75|1325.70|310.50|570.24|1636.20|1.86|124.20|408.78|186.30|188.16|595.08|596.94|-383.94| +2451878|54989|2451959|3359|11575|599644|2113|44229|81040|173113|2036|26552|44|2|14|3|232|113|51|73.77|178.52|7.14|8740.38|364.14|3762.27|9104.52|14.56|0.00|4278.90|364.14|378.70|4643.04|4657.60|-3398.13| +2451878|54989|2451922|14084|11575|599644|2113|44229|81040|173113|2036|26552|26|14|12|1|290|113|38|71.70|204.34|183.90|776.72|6988.20|2724.60|7764.92|209.64|0.00|2795.28|6988.20|7197.84|9783.48|9993.12|4263.60| +2451878|54989|2451937|16837|11575|599644|2113|44229|81040|173113|2036|26552|35|13|6|3|237|113|38|26.48|72.55|68.92|137.94|2618.96|1006.24|2756.90|0.00|497.60|358.34|2121.36|2121.36|2479.70|2479.70|1115.12| +2451878|54989|2451901|10831|11575|599644|2113|44229|81040|173113|2036|26552|17|23|5|3|111|113|81|59.30|116.82|68.92|3879.90|5582.52|4803.30|9462.42|502.42|0.00|283.50|5582.52|6084.94|5866.02|6368.44|779.22| +2452221|67128|2452253|10055|81152|1540093|1787|13481|21177|206842|2257|12351|29|3|2|4|16|114|64|58.78|88.75|6.21|5282.56|397.44|3761.92|5680.00|15.89|0.00|1817.60|397.44|413.33|2215.04|2230.93|-3364.48| +2452221|67128|2452339|5223|81152|1540093|1787|13481|21177|206842|2257|12351|5|1|12|5|131|114|39|88.29|183.64|167.11|644.67|6517.29|3443.31|7161.96|586.55|0.00|644.28|6517.29|7103.84|7161.57|7748.12|3073.98| +2452221|67128|2452313|14329|81152|1540093|1787|13481|21177|206842|2257|12351|25|23|1|2|16|114|21|97.11|247.63|240.20|156.03|5044.20|2039.31|5200.23|252.21|0.00|1715.91|5044.20|5296.41|6760.11|7012.32|3004.89| +2452221|67128|2452307|223|81152|1540093|1787|13481|21177|206842|2257|12351|49|23|9|4|266|114|42|82.60|246.97|22.22|9439.50|933.24|3469.20|10372.74|1.95|867.91|3215.52|65.33|67.28|3280.85|3282.80|-3403.87| +2452221|67128|2452243|12413|81152|1540093|1787|13481|21177|206842|2257|12351|21|5|5|2|2|114|35|67.82|185.82|65.03|4227.65|2276.05|2373.70|6503.70|182.08|0.00|780.15|2276.05|2458.13|3056.20|3238.28|-97.65| +2452221|67128|2452230|11599|81152|1540093|1787|13481|21177|206842|2257|12351|5|19|7|4|61|114|67|35.04|46.60|23.30|1561.10|1561.10|2347.68|3122.20|15.61|0.00|936.66|1561.10|1576.71|2497.76|2513.37|-786.58| +2452221|67128|2452297|8017|81152|1540093|1787|13481|21177|206842|2257|12351|49|25|11|4|120|114|61|22.02|42.05|9.67|1975.18|589.87|1343.22|2565.05|29.49|0.00|307.44|589.87|619.36|897.31|926.80|-753.35| +2452221|67128|2452271|2833|81152|1540093|1787|13481|21177|206842|2257|12351|33|13|17|2|157|114|23|69.64|114.20|91.36|525.32|2101.28|1601.72|2626.60|105.06|0.00|603.98|2101.28|2206.34|2705.26|2810.32|499.56| +2452221|67128|2452275|11175|81152|1540093|1787|13481|21177|206842|2257|12351|3|1|13|2|164|114|34|83.22|182.25|134.86|1611.26|4585.24|2829.48|6196.50|0.00|0.00|1053.32|4585.24|4585.24|5638.56|5638.56|1755.76| +2452221|67128|2452341|611|81152|1540093|1787|13481|21177|206842|2257|12351|57|19|4|5|6|114|5|53.98|121.99|50.01|359.90|250.05|269.90|609.95|22.50|0.00|134.15|250.05|272.55|384.20|406.70|-19.85| +2452221|67128|2452226|6647|81152|1540093|1787|13481|21177|206842|2257|12351|11|13|5|1|77|114|12|69.88|79.66|43.81|430.20|525.72|838.56|955.92|42.05|0.00|248.52|525.72|567.77|774.24|816.29|-312.84| +2452221|67128|2452234|5085|81152|1540093|1787|13481|21177|206842|2257|12351|25|13|1|5|199|114|49|54.72|154.31|138.87|756.56|6804.63|2681.28|7561.19|476.32|0.00|3478.02|6804.63|7280.95|10282.65|10758.97|4123.35| +2452221|67128|2452228|14537|81152|1540093|1787|13481|21177|206842|2257|12351|43|3|9|4|136|114|63|73.81|200.76|68.25|8348.13|4299.75|4650.03|12647.88|343.98|0.00|3667.86|4299.75|4643.73|7967.61|8311.59|-350.28| +2452221|67128|2452310|11373|81152|1540093|1787|13481|21177|206842|2257|12351|39|11|5|1|160|114|80|6.68|7.94|6.74|96.00|539.20|534.40|635.20|43.13|0.00|113.60|539.20|582.33|652.80|695.93|4.80| +2452221|67128|2452233|9291|81152|1540093|1787|13481|21177|206842|2257|12351|27|15|12|4|283|114|25|31.10|53.49|14.44|976.25|361.00|777.50|1337.25|0.00|0.00|173.75|361.00|361.00|534.75|534.75|-416.50| +2452221|67128|2452297|10619|81152|1540093|1787|13481|21177|206842|2257|12351|29|9|8|3|238|114|33|5.29|10.47|5.65|159.06|186.45|174.57|345.51|9.24|70.85|124.08|115.60|124.84|239.68|248.92|-58.97| +2451151|15600|2451221|11689|6582|1340820|5458|13649|79843|1871584|3563|34601|1|7|7|2|50|115|37|96.20|169.31|145.60|877.27|5387.20|3559.40|6264.47|215.48|0.00|62.53|5387.20|5602.68|5449.73|5665.21|1827.80| +2451151|15600|2451253|15208|6582|1340820|5458|13649|79843|1871584|3563|34601|31|28|4|5|289|115|92|71.31|110.53|51.94|5390.28|4778.48|6560.52|10168.76|382.27|0.00|406.64|4778.48|5160.75|5185.12|5567.39|-1782.04| +2451151|15600|2451257|4948|6582|1340820|5458|13649|79843|1871584|3563|34601|1|16|20|3|49|115|52|59.19|102.99|4.11|5141.76|213.72|3077.88|5355.48|6.41|0.00|2249.00|213.72|220.13|2462.72|2469.13|-2864.16| +2451151|15600|2451257|13024|6582|1340820|5458|13649|79843|1871584|3563|34601|50|10|6|1|276|115|49|55.13|135.06|90.49|2183.93|4434.01|2701.37|6617.94|177.36|0.00|727.65|4434.01|4611.37|5161.66|5339.02|1732.64| +2451151|15600|2451181|13054|6582|1340820|5458|13649|79843|1871584|3563|34601|26|20|8|2|159|115|6|49.20|126.44|69.54|341.40|417.24|295.20|758.64|25.03|0.00|22.74|417.24|442.27|439.98|465.01|122.04| +2451151|15600|2451270|1378|6582|1340820|5458|13649|79843|1871584|3563|34601|10|28|14|3|282|115|15|8.04|18.09|14.83|48.90|222.45|120.60|271.35|4.87|60.06|40.65|162.39|167.26|203.04|207.91|41.79| +2451151|15600|2451211|10928|6582|1340820|5458|13649|79843|1871584|3563|34601|40|25|8|3|5|115|49|74.34|155.37|138.27|837.90|6775.23|3642.66|7613.13|406.51|0.00|3121.30|6775.23|7181.74|9896.53|10303.04|3132.57| +2451151|15600|2451262|1732|6582|1340820|5458|13649|79843|1871584|3563|34601|16|14|14|3|12|115|51|25.73|72.30|29.64|2175.66|1511.64|1312.23|3687.30|75.58|0.00|1732.98|1511.64|1587.22|3244.62|3320.20|199.41| +2451151|15600|2451162|17470|6582|1340820|5458|13649|79843|1871584|3563|34601|46|26|4|4|104|115|9|43.24|79.12|46.68|291.96|420.12|389.16|712.08|4.20|0.00|291.87|420.12|424.32|711.99|716.19|30.96| +2451151|15600|2451209|658|6582|1340820|5458|13649|79843|1871584|3563|34601|28|1|17|3|109|115|21|48.83|105.96|90.06|333.90|1891.26|1025.43|2225.16|0.00|0.00|600.60|1891.26|1891.26|2491.86|2491.86|865.83| +2451151|15600|2451235|13201|6582|1340820|5458|13649|79843|1871584|3563|34601|58|26|18|2|176|115|77|84.71|223.63|169.95|4133.36|13086.15|6522.67|17219.51|467.17|6412.21|8092.70|6673.94|7141.11|14766.64|15233.81|151.27| +2451151|15600|2451175|14191|6582|1340820|5458|13649|79843|1871584|3563|34601|1|19|19|5|44|115|53|16.50|37.95|7.96|1589.47|421.88|874.50|2011.35|0.00|206.72|160.59|215.16|215.16|375.75|375.75|-659.34| +2451151|15600|2451186|11839|6582|1340820|5458|13649|79843|1871584|3563|34601|43|20|10|4|188|115|74|95.25|222.88|173.84|3628.96|12864.16|7048.50|16493.12|257.28|0.00|5442.70|12864.16|13121.44|18306.86|18564.14|5815.66| +2451151|15600|2451163|13180|6582|1340820|5458|13649|79843|1871584|3563|34601|52|14|2|3|177|115|31|96.58|114.93|108.03|213.90|3348.93|2993.98|3562.83|66.97|0.00|463.14|3348.93|3415.90|3812.07|3879.04|354.95| +2451151|15600|2451193|1960|6582|1340820|5458|13649|79843|1871584|3563|34601|34|19|9|3|67|115|9|5.05|12.97|10.50|22.23|94.50|45.45|116.73|7.56|0.00|17.46|94.50|102.06|111.96|119.52|49.05| +2451151|15600|2451262|2864|6582|1340820|5458|13649|79843|1871584|3563|34601|13|1|5|4|6|115|81|79.30|184.76|92.38|7482.78|7482.78|6423.30|14965.56|673.45|0.00|3591.54|7482.78|8156.23|11074.32|11747.77|1059.48| +2451547|19764|2451663|8971|83106|946511|7069|19627|71775|1898902|6548|40753|44|1|19|4|182|116|77|18.20|38.58|0.77|2911.37|59.29|1401.40|2970.66|1.54|42.09|890.89|17.20|18.74|908.09|909.63|-1384.20| +2451547|19764|2451636|15752|83106|946511|7069|19627|71775|1898902|6548|40753|53|13|20|3|23|116|5|94.88|268.51|220.17|241.70|1100.85|474.40|1342.55|36.54|187.14|416.15|913.71|950.25|1329.86|1366.40|439.31| +2451547|19764|2451566|1777|83106|946511|7069|19627|71775|1898902|6548|40753|35|1|11|1|114|116|92|85.47|113.67|0.00|10457.64|0.00|7863.24|10457.64|0.00|0.00|1777.44|0.00|0.00|1777.44|1777.44|-7863.24| +2451547|19764|2451664|65|83106|946511|7069|19627|71775|1898902|6548|40753|25|1|19|2|205|116|4|13.74|32.15|18.96|52.76|75.84|54.96|128.60|3.03|0.00|52.72|75.84|78.87|128.56|131.59|20.88| +2451547|19764|2451638|16640|83106|946511|7069|19627|71775|1898902|6548|40753|29|29|5|5|172|116|67|10.23|24.04|5.28|1256.92|353.76|685.41|1610.68|0.00|0.00|353.76|353.76|353.76|707.52|707.52|-331.65| +2451547|19764|2451631|2282|83106|946511|7069|19627|71775|1898902|6548|40753|44|29|4|1|215|116|59|46.24|79.53|73.96|328.63|4363.64|2728.16|4692.27|174.54|0.00|421.85|4363.64|4538.18|4785.49|4960.03|1635.48| +2451547|19764|2451555|12542|83106|946511|7069|19627|71775|1898902|6548|40753|20|23|13|4|135|116|91|70.09|84.80|13.56|6482.84|1233.96|6378.19|7716.80|111.05|0.00|3472.56|1233.96|1345.01|4706.52|4817.57|-5144.23| +2451547|19764|2451569|769|83106|946511|7069|19627|71775|1898902|6548|40753|11|29|11|5|289|116|81|25.55|73.07|27.76|3670.11|2248.56|2069.55|5918.67|202.37|0.00|2722.41|2248.56|2450.93|4970.97|5173.34|179.01| +2451547|19764|2451582|662|83106|946511|7069|19627|71775|1898902|6548|40753|32|5|12|3|217|116|17|71.80|142.16|98.09|749.19|1667.53|1220.60|2416.72|66.70|0.00|990.76|1667.53|1734.23|2658.29|2724.99|446.93| +2451547|19764|2451574|2222|83106|946511|7069|19627|71775|1898902|6548|40753|23|14|17|3|222|116|5|22.73|28.41|5.39|115.10|26.95|113.65|142.05|0.00|26.95|55.35|0.00|0.00|55.35|55.35|-113.65| +2451547|19764|2451622|10193|83106|946511|7069|19627|71775|1898902|6548|40753|7|2|10|1|19|116|68|58.31|72.88|40.81|2180.76|2775.08|3965.08|4955.84|23.31|2442.07|2477.92|333.01|356.32|2810.93|2834.24|-3632.07| +2451547|19764|2451632|3349|83106|946511|7069|19627|71775|1898902|6548|40753|56|13|2|5|218|116|11|67.39|139.49|37.66|1120.13|414.26|741.29|1534.39|4.14|0.00|751.85|414.26|418.40|1166.11|1170.25|-327.03| +2451547|19764|2451567|8795|83106|946511|7069|19627|71775|1898902|6548|40753|35|11|11|4|134|116|10|91.13|268.83|29.57|2392.60|295.70|911.30|2688.30|2.95|0.00|860.20|295.70|298.65|1155.90|1158.85|-615.60| +2451547|19764|2451564|6914|83106|946511|7069|19627|71775|1898902|6548|40753|19|29|7|4|261|116|92|10.89|11.21|8.74|227.24|804.08|1001.88|1031.32|24.12|0.00|370.76|804.08|828.20|1174.84|1198.96|-197.80| +2451547|19764|2451554|16121|83106|946511|7069|19627|71775|1898902|6548|40753|29|14|14|2|93|116|54|5.98|6.21|4.16|110.70|224.64|322.92|335.34|0.00|0.00|56.70|224.64|224.64|281.34|281.34|-98.28| +2451317|66696|2451406|2797|61015|1648533|2898|35663|37271|1401607|1064|29429|22|2|2|2|242|117|75|86.29|123.39|49.35|5553.00|3701.25|6471.75|9254.25|333.11|0.00|3423.75|3701.25|4034.36|7125.00|7458.11|-2770.50| +2451317|66696|2451347|14938|61015|1648533|2898|35663|37271|1401607|1064|29429|34|22|9|1|160|117|40|8.69|16.07|9.32|270.00|372.80|347.60|642.80|14.91|0.00|314.80|372.80|387.71|687.60|702.51|25.20| +2451317|66696|2451337|4669|61015|1648533|2898|35663|37271|1401607|1064|29429|1|19|12|4|143|117|67|93.18|127.65|114.88|855.59|7696.96|6243.06|8552.55|615.75|0.00|1368.14|7696.96|8312.71|9065.10|9680.85|1453.90| +2451317|66696|2451322|14833|61015|1648533|2898|35663|37271|1401607|1064|29429|31|28|14|3|162|117|78|92.26|261.09|46.99|16699.80|3665.22|7196.28|20365.02|293.21|0.00|4683.90|3665.22|3958.43|8349.12|8642.33|-3531.06| +2451317|66696|2451378|17842|61015|1648533|2898|35663|37271|1401607|1064|29429|16|8|4|5|219|117|45|56.17|106.72|26.68|3601.80|1200.60|2527.65|4802.40|5.88|612.30|1776.60|588.30|594.18|2364.90|2370.78|-1939.35| +2451317|66696|2451418|6958|61015|1648533|2898|35663|37271|1401607|1064|29429|2|28|1|2|280|117|20|8.49|21.47|12.88|171.80|257.60|169.80|429.40|23.18|0.00|128.80|257.60|280.78|386.40|409.58|87.80| +2451317|66696|2451386|11869|61015|1648533|2898|35663|37271|1401607|1064|29429|8|28|15|5|255|117|34|54.02|64.28|16.71|1617.38|568.14|1836.68|2185.52|22.72|0.00|393.38|568.14|590.86|961.52|984.24|-1268.54| +2451317|66696|2451395|7513|61015|1648533|2898|35663|37271|1401607|1064|29429|1|28|19|3|147|117|60|30.03|34.83|31.34|209.40|1880.40|1801.80|2089.80|75.21|0.00|647.40|1880.40|1955.61|2527.80|2603.01|78.60| +2451317|66696|2451337|9926|61015|1648533|2898|35663|37271|1401607|1064|29429|31|26|2|5|98|117|64|90.00|233.10|9.32|14321.92|596.48|5760.00|14918.40|0.00|0.00|894.72|596.48|596.48|1491.20|1491.20|-5163.52| +2451317|66696|2451434|8744|61015|1648533|2898|35663|37271|1401607|1064|29429|32|1|18|2|96|117|91|79.73|207.29|93.28|10374.91|8488.48|7255.43|18863.39|169.76|0.00|754.39|8488.48|8658.24|9242.87|9412.63|1233.05| +2451317|66696|2451425|6530|61015|1648533|2898|35663|37271|1401607|1064|29429|40|4|2|1|180|117|35|19.83|33.90|29.49|154.35|1032.15|694.05|1186.50|0.00|0.00|308.35|1032.15|1032.15|1340.50|1340.50|338.10| +2451317|66696|2451366|17455|61015|1648533|2898|35663|37271|1401607|1064|29429|31|4|20|2|121|117|39|33.25|81.13|18.65|2436.72|727.35|1296.75|3164.07|35.13|225.47|1265.55|501.88|537.01|1767.43|1802.56|-794.87| +2451317|66696|2451409|793|61015|1648533|2898|35663|37271|1401607|1064|29429|49|19|5|5|235|117|60|27.17|69.01|11.04|3478.20|662.40|1630.20|4140.60|59.61|0.00|207.00|662.40|722.01|869.40|929.01|-967.80| +2451317|66696|2451358|362|61015|1648533|2898|35663|37271|1401607|1064|29429|28|8|15|2|162|117|51|91.92|204.98|135.28|3554.70|6899.28|4687.92|10453.98|68.99|0.00|1358.64|6899.28|6968.27|8257.92|8326.91|2211.36| +2451870|79151|2451946|9835|4226|1085607|1388|46322|75166|750026|1665|30646|35|26|17|3|46|118|14|11.98|23.48|4.69|263.06|65.66|167.72|328.72|1.31|0.00|52.50|65.66|66.97|118.16|119.47|-102.06| +2451870|79151|2451929|16157|4226|1085607|1388|46322|75166|750026|1665|30646|35|7|10|1|292|118|6|79.47|185.95|130.16|334.74|780.96|476.82|1115.70|23.42|0.00|301.20|780.96|804.38|1082.16|1105.58|304.14| +2451870|79151|2451953|8821|4226|1085607|1388|46322|75166|750026|1665|30646|55|25|5|5|192|118|15|71.71|206.52|99.12|1611.00|1486.80|1075.65|3097.80|3.12|1442.19|619.50|44.61|47.73|664.11|667.23|-1031.04| +2451870|79151|2451923|6686|4226|1085607|1388|46322|75166|750026|1665|30646|38|19|20|5|220|118|100|97.11|200.04|120.02|8002.00|12002.00|9711.00|20004.00|600.10|0.00|800.00|12002.00|12602.10|12802.00|13402.10|2291.00| +2451870|79151|2451875|98|4226|1085607|1388|46322|75166|750026|1665|30646|20|13|16|2|168|118|8|11.00|15.07|6.32|70.00|50.56|88.00|120.56|3.53|0.00|1.20|50.56|54.09|51.76|55.29|-37.44| +2451870|79151|2451934|38|4226|1085607|1388|46322|75166|750026|1665|30646|29|8|5|5|282|118|43|91.73|185.29|131.55|2310.82|5656.65|3944.39|7967.47|0.00|0.00|796.36|5656.65|5656.65|6453.01|6453.01|1712.26| +2451870|79151|2451904|1871|4226|1085607|1388|46322|75166|750026|1665|30646|41|1|1|1|248|118|54|38.71|43.35|22.10|1147.50|1193.40|2090.34|2340.90|83.53|0.00|351.00|1193.40|1276.93|1544.40|1627.93|-896.94| +2451870|79151|2451938|2875|4226|1085607|1388|46322|75166|750026|1665|30646|56|8|8|4|210|118|96|96.34|147.40|48.64|9480.96|4669.44|9248.64|14150.40|140.08|0.00|3395.52|4669.44|4809.52|8064.96|8205.04|-4579.20| +2451870|79151|2451989|10195|4226|1085607|1388|46322|75166|750026|1665|30646|59|25|3|1|89|118|87|8.21|12.47|1.12|987.45|97.44|714.27|1084.89|6.82|0.00|466.32|97.44|104.26|563.76|570.58|-616.83| +2451870|79151|2451964|8318|4226|1085607|1388|46322|75166|750026|1665|30646|32|7|11|3|235|118|46|63.99|181.09|52.51|5914.68|2415.46|2943.54|8330.14|24.15|0.00|2915.48|2415.46|2439.61|5330.94|5355.09|-528.08| +2452461|65711|2452466|15306|67606|714813|6373|5933|51134|21027|7013|12173|60|18|11|5|262|119|12|58.95|176.85|56.59|1443.12|679.08|707.40|2122.20|9.37|522.89|148.44|156.19|165.56|304.63|314.00|-551.21| +2452461|65711|2452553|4584|67606|714813|6373|5933|51134|21027|7013|12173|9|21|14|2|110|119|15|15.18|44.32|41.66|39.90|624.90|227.70|664.80|43.74|0.00|272.55|624.90|668.64|897.45|941.19|397.20| +2452461|65711|2452515|1077|67606|714813|6373|5933|51134|21027|7013|12173|18|27|19|2|22|119|10|44.33|121.02|67.77|532.50|677.70|443.30|1210.20|60.99|0.00|254.10|677.70|738.69|931.80|992.79|234.40| +2452461|65711|2452499|15042|67606|714813|6373|5933|51134|21027|7013|12173|13|15|6|4|235|119|92|44.23|122.95|73.77|4524.56|6786.84|4069.16|11311.40|407.21|0.00|4524.56|6786.84|7194.05|11311.40|11718.61|2717.68| +2452461|65711|2452579|11179|67606|714813|6373|5933|51134|21027|7013|12173|6|30|4|1|245|119|63|8.67|13.61|1.90|737.73|119.70|546.21|857.43|3.59|0.00|274.05|119.70|123.29|393.75|397.34|-426.51| +2452461|65711|2452538|348|67606|714813|6373|5933|51134|21027|7013|12173|43|27|19|1|6|119|14|27.17|51.35|15.91|496.16|222.74|380.38|718.90|0.00|0.00|323.40|222.74|222.74|546.14|546.14|-157.64| +2452461|65711|2452465|14103|67606|714813|6373|5933|51134|21027|7013|12173|6|9|7|4|184|119|91|66.58|120.50|12.05|9868.95|1096.55|6058.78|10965.50|10.74|943.03|2082.99|153.52|164.26|2236.51|2247.25|-5905.26| +2452461|65711|2452580|3006|67606|714813|6373|5933|51134|21027|7013|12173|36|7|10|4|165|119|72|7.92|11.88|0.83|795.60|59.76|570.24|855.36|0.59|0.00|0.00|59.76|60.35|59.76|60.35|-510.48| +2452461|65711|2452465|12469|67606|714813|6373|5933|51134|21027|7013|12173|54|9|10|5|110|119|48|87.82|99.23|38.69|2905.92|1857.12|4215.36|4763.04|148.56|0.00|142.56|1857.12|2005.68|1999.68|2148.24|-2358.24| +2452461|65711|2452515|8575|67606|714813|6373|5933|51134|21027|7013|12173|25|9|17|2|214|119|37|95.91|117.01|94.77|822.88|3506.49|3548.67|4329.37|13.32|2174.02|2164.50|1332.47|1345.79|3496.97|3510.29|-2216.20| +2452461|65711|2452488|3564|67606|714813|6373|5933|51134|21027|7013|12173|43|1|13|2|231|119|76|71.78|203.13|154.37|3705.76|11732.12|5455.28|15437.88|469.28|0.00|4630.68|11732.12|12201.40|16362.80|16832.08|6276.84| +2452461|65711|2452525|9228|67606|714813|6373|5933|51134|21027|7013|12173|51|1|16|4|219|119|37|70.41|207.00|82.80|4595.40|3063.60|2605.17|7659.00|183.81|0.00|995.67|3063.60|3247.41|4059.27|4243.08|458.43| +2452461|65711|2452575|16519|67606|714813|6373|5933|51134|21027|7013|12173|19|13|17|4|21|119|63|97.04|284.32|51.17|14688.45|3223.71|6113.52|17912.16|0.00|0.00|357.84|3223.71|3223.71|3581.55|3581.55|-2889.81| +2451979|81904|2451991|12061|50537|854361|3432|42903|18273|1387425|516|43940|25|5|16|4|19|120|52|17.72|49.43|48.44|51.48|2518.88|921.44|2570.36|125.94|0.00|76.96|2518.88|2644.82|2595.84|2721.78|1597.44| +2451979|81904|2452055|9157|50537|854361|3432|42903|18273|1387425|516|43940|29|9|15|3|118|120|74|85.41|229.75|165.42|4760.42|12241.08|6320.34|17001.50|734.46|0.00|8330.18|12241.08|12975.54|20571.26|21305.72|5920.74| +2451979|81904|2452056|14811|50537|854361|3432|42903|18273|1387425|516|43940|3|1|3|2|104|120|86|27.37|37.77|27.94|845.38|2402.84|2353.82|3248.22|56.22|1778.10|324.22|624.74|680.96|948.96|1005.18|-1729.08| +2451979|81904|2451990|8697|50537|854361|3432|42903|18273|1387425|516|43940|37|3|15|5|138|120|48|17.33|24.43|0.48|1149.60|23.04|831.84|1172.64|0.69|0.00|363.36|23.04|23.73|386.40|387.09|-808.80| +2451979|81904|2452090|15917|50537|854361|3432|42903|18273|1387425|516|43940|23|5|11|2|73|120|52|16.87|44.19|29.60|758.68|1539.20|877.24|2297.88|0.00|0.00|459.16|1539.20|1539.20|1998.36|1998.36|661.96| +2451979|81904|2452042|14787|50537|854361|3432|42903|18273|1387425|516|43940|37|7|2|2|12|120|98|66.81|89.52|14.32|7369.60|1403.36|6547.38|8772.96|14.03|0.00|2455.88|1403.36|1417.39|3859.24|3873.27|-5144.02| +2451979|81904|2451989|4625|50537|854361|3432|42903|18273|1387425|516|43940|9|19|12|2|62|120|60|77.03|221.84|37.71|11047.80|2262.60|4621.80|13310.40|22.62|0.00|532.20|2262.60|2285.22|2794.80|2817.42|-2359.20| +2451979|81904|2452090|4381|50537|854361|3432|42903|18273|1387425|516|43940|47|7|4|4|117|120|94|49.72|112.86|51.91|5729.30|4879.54|4673.68|10608.84|48.79|0.00|5198.20|4879.54|4928.33|10077.74|10126.53|205.86| +2452131|5091|2452235|7107|84173|884481|5589|23897|73770|532206|2821|43529|53|9|13|1|89|121|14|67.59|181.81|125.44|789.18|1756.16|946.26|2545.34|17.56|0.00|1196.30|1756.16|1773.72|2952.46|2970.02|809.90| +2452131|5091|2452145|9677|84173|884481|5589|23897|73770|532206|2821|43529|35|21|4|2|290|121|39|81.77|191.34|40.18|5895.24|1567.02|3189.03|7462.26|62.68|0.00|596.70|1567.02|1629.70|2163.72|2226.40|-1622.01| +2452131|5091|2452140|11847|84173|884481|5589|23897|73770|532206|2821|43529|53|15|9|2|138|121|68|45.75|98.36|75.73|1538.84|5149.64|3111.00|6688.48|0.00|3038.28|1136.96|2111.36|2111.36|3248.32|3248.32|-999.64| +2452131|5091|2452229|10585|84173|884481|5589|23897|73770|532206|2821|43529|37|25|10|2|274|121|93|68.38|136.07|16.32|11136.75|1517.76|6359.34|12654.51|121.42|0.00|3036.45|1517.76|1639.18|4554.21|4675.63|-4841.58| +2452131|5091|2452135|17351|84173|884481|5589|23897|73770|532206|2821|43529|59|11|19|2|195|121|4|72.91|95.51|42.02|213.96|168.08|291.64|382.04|6.21|43.70|118.40|124.38|130.59|242.78|248.99|-167.26| +2452131|5091|2452133|11725|84173|884481|5589|23897|73770|532206|2821|43529|3|15|14|5|266|121|93|51.11|69.50|11.12|5429.34|1034.16|4753.23|6463.50|82.73|0.00|1034.16|1034.16|1116.89|2068.32|2151.05|-3719.07| +2452131|5091|2452214|8505|84173|884481|5589|23897|73770|532206|2821|43529|5|25|3|3|103|121|91|73.32|122.44|52.64|6351.80|4790.24|6672.12|11142.04|383.21|0.00|1001.91|4790.24|5173.45|5792.15|6175.36|-1881.88| +2452131|5091|2452154|10025|84173|884481|5589|23897|73770|532206|2821|43529|51|5|13|5|107|121|87|96.30|269.64|115.94|13371.90|10086.78|8378.10|23458.68|504.33|0.00|5394.87|10086.78|10591.11|15481.65|15985.98|1708.68| +2452131|5091|2452182|601|84173|884481|5589|23897|73770|532206|2821|43529|11|15|13|5|67|121|83|48.93|145.32|14.53|10855.57|1205.99|4061.19|12061.56|48.23|0.00|2532.33|1205.99|1254.22|3738.32|3786.55|-2855.20| +2451053|33607|2451171|2194|39352|1220992|5385|20625|6826|981636|4298|39891|32|4|9|3|224|122|28|2.57|4.00|3.36|17.92|94.08|71.96|112.00|2.70|60.21|41.44|33.87|36.57|75.31|78.01|-38.09| +2451053|33607|2451054|12055|39352|1220992|5385|20625|6826|981636|4298|39891|8|20|10|4|98|122|43|14.31|41.06|18.06|989.00|776.58|615.33|1765.58|7.76|0.00|794.21|776.58|784.34|1570.79|1578.55|161.25| +2451053|33607|2451125|14737|39352|1220992|5385|20625|6826|981636|4298|39891|50|2|18|1|207|122|80|34.63|48.48|12.60|2870.40|1008.00|2770.40|3878.40|80.64|0.00|775.20|1008.00|1088.64|1783.20|1863.84|-1762.40| +2451053|33607|2451117|8822|39352|1220992|5385|20625|6826|981636|4298|39891|44|22|11|5|182|122|9|6.26|16.90|14.19|24.39|127.71|56.34|152.10|2.55|0.00|59.31|127.71|130.26|187.02|189.57|71.37| +2451053|33607|2451101|13363|39352|1220992|5385|20625|6826|981636|4298|39891|10|26|19|2|262|122|81|93.90|246.95|133.35|9201.60|10801.35|7605.90|20002.95|216.02|0.00|9200.79|10801.35|11017.37|20002.14|20218.16|3195.45| +2451053|33607|2451085|10351|39352|1220992|5385|20625|6826|981636|4298|39891|28|20|17|3|59|122|59|20.05|32.48|19.81|747.53|1168.79|1182.95|1916.32|23.37|0.00|823.64|1168.79|1192.16|1992.43|2015.80|-14.16| +2451053|33607|2451154|9733|39352|1220992|5385|20625|6826|981636|4298|39891|44|10|9|5|248|122|98|97.33|115.82|63.70|5107.76|6242.60|9538.34|11350.36|561.83|0.00|3290.84|6242.60|6804.43|9533.44|10095.27|-3295.74| +2451053|33607|2451074|2584|39352|1220992|5385|20625|6826|981636|4298|39891|16|26|9|1|216|122|63|99.08|177.35|136.55|2570.40|8602.65|6242.04|11173.05|0.00|0.00|5586.21|8602.65|8602.65|14188.86|14188.86|2360.61| +2451053|33607|2451133|302|39352|1220992|5385|20625|6826|981636|4298|39891|7|26|17|5|300|122|98|64.44|119.85|20.37|9749.04|1996.26|6315.12|11745.30|39.92|0.00|4932.34|1996.26|2036.18|6928.60|6968.52|-4318.86| +2451053|33607|2451151|12247|39352|1220992|5385|20625|6826|981636|4298|39891|8|25|8|1|254|122|81|40.80|115.46|55.42|4863.24|4489.02|3304.80|9352.26|359.12|0.00|934.74|4489.02|4848.14|5423.76|5782.88|1184.22| +2451053|33607|2451138|4738|39352|1220992|5385|20625|6826|981636|4298|39891|7|19|16|5|51|122|60|90.07|165.72|79.54|5170.80|4772.40|5404.20|9943.20|95.44|0.00|3976.80|4772.40|4867.84|8749.20|8844.64|-631.80| +2451053|33607|2451082|13304|39352|1220992|5385|20625|6826|981636|4298|39891|22|2|2|2|32|122|82|13.66|19.39|13.37|493.64|1096.34|1120.12|1589.98|21.92|0.00|364.90|1096.34|1118.26|1461.24|1483.16|-23.78| +2452429|31055|2452504|10407|41018|354220|6622|33024|85953|1616450|4230|39032|1|24|10|3|246|123|47|99.62|268.97|142.55|5941.74|6699.85|4682.14|12641.59|359.11|2210.95|505.25|4488.90|4848.01|4994.15|5353.26|-193.24| +2452429|31055|2452499|15171|41018|354220|6622|33024|85953|1616450|4230|39032|42|9|19|3|77|123|56|98.90|148.35|5.93|7975.52|332.08|5538.40|8307.60|23.24|0.00|3821.44|332.08|355.32|4153.52|4176.76|-5206.32| +2452429|31055|2452541|3642|41018|354220|6622|33024|85953|1616450|4230|39032|18|9|14|3|92|123|60|62.95|124.64|18.69|6357.00|1121.40|3777.00|7478.40|100.92|0.00|3514.80|1121.40|1222.32|4636.20|4737.12|-2655.60| +2452429|31055|2452521|4827|41018|354220|6622|33024|85953|1616450|4230|39032|21|1|8|5|165|123|93|41.64|69.53|20.16|4591.41|1874.88|3872.52|6466.29|149.99|0.00|2974.14|1874.88|2024.87|4849.02|4999.01|-1997.64| +2452429|31055|2452440|6867|41018|354220|6622|33024|85953|1616450|4230|39032|3|3|19|4|69|123|99|69.84|203.93|87.68|11508.75|8680.32|6914.16|20189.07|86.80|0.00|1412.73|8680.32|8767.12|10093.05|10179.85|1766.16| +2452429|31055|2452467|783|41018|354220|6622|33024|85953|1616450|4230|39032|43|25|4|5|46|123|97|13.60|20.80|8.94|1150.42|867.18|1319.20|2017.60|16.12|598.35|241.53|268.83|284.95|510.36|526.48|-1050.37| +2452429|31055|2452524|13969|41018|354220|6622|33024|85953|1616450|4230|39032|54|3|20|5|123|123|77|76.71|182.56|16.43|12792.01|1265.11|5906.67|14057.12|25.30|0.00|843.15|1265.11|1290.41|2108.26|2133.56|-4641.56| +2452429|31055|2452474|3240|41018|354220|6622|33024|85953|1616450|4230|39032|51|13|5|1|17|123|86|57.01|151.64|54.59|8346.30|4694.74|4902.86|13041.04|93.89|0.00|129.86|4694.74|4788.63|4824.60|4918.49|-208.12| +2452429|31055|2452483|16521|41018|354220|6622|33024|85953|1616450|4230|39032|43|7|13|5|294|123|40|82.86|216.26|179.49|1470.80|7179.60|3314.40|8650.40|287.18|0.00|432.40|7179.60|7466.78|7612.00|7899.18|3865.20| +2452429|31055|2452468|3675|41018|354220|6622|33024|85953|1616450|4230|39032|19|9|20|5|267|123|100|31.86|95.26|35.24|6002.00|3524.00|3186.00|9526.00|140.96|0.00|1809.00|3524.00|3664.96|5333.00|5473.96|338.00| +2452429|31055|2452514|8334|41018|354220|6622|33024|85953|1616450|4230|39032|49|19|5|5|295|123|98|92.39|215.26|210.95|422.38|20673.10|9054.22|21095.48|0.00|20673.10|6117.16|0.00|0.00|6117.16|6117.16|-9054.22| +2452429|31055|2452545|16315|41018|354220|6622|33024|85953|1616450|4230|39032|33|12|8|5|199|123|35|61.86|153.41|128.86|859.25|4510.10|2165.10|5369.35|45.10|0.00|2577.05|4510.10|4555.20|7087.15|7132.25|2345.00| +2452429|31055|2452537|17239|41018|354220|6622|33024|85953|1616450|4230|39032|42|25|5|2|169|123|78|33.87|95.85|12.46|6504.42|971.88|2641.86|7476.30|18.27|515.09|74.10|456.79|475.06|530.89|549.16|-2185.07| +2452429|31055|2452495|14479|41018|354220|6622|33024|85953|1616450|4230|39032|12|12|6|1|66|123|72|68.35|114.14|52.50|4438.08|3780.00|4921.20|8218.08|37.80|0.00|3368.88|3780.00|3817.80|7148.88|7186.68|-1141.20| +2452429|31055|2452536|10893|41018|354220|6622|33024|85953|1616450|4230|39032|30|12|14|5|23|123|26|27.92|34.06|31.33|70.98|814.58|725.92|885.56|1.95|716.83|0.00|97.75|99.70|97.75|99.70|-628.17| +2451649|76443|2451717|8252|45329|493247|1612|24730|30670|1322511|6884|36049|14|20|17|5|43|124|81|96.71|107.34|50.44|4608.90|4085.64|7833.51|8694.54|81.71|0.00|2955.69|4085.64|4167.35|7041.33|7123.04|-3747.87| +2451649|76443|2451734|15266|45329|493247|1612|24730|30670|1322511|6884|36049|14|19|15|2|159|124|78|81.73|141.39|128.66|992.94|10035.48|6374.94|11028.42|642.27|2007.09|4190.16|8028.39|8670.66|12218.55|12860.82|1653.45| +2451649|76443|2451753|5378|45329|493247|1612|24730|30670|1322511|6884|36049|25|29|7|2|14|124|71|70.31|119.52|107.56|849.16|7636.76|4992.01|8485.92|534.57|0.00|3733.18|7636.76|8171.33|11369.94|11904.51|2644.75| +2451649|76443|2451684|13799|45329|493247|1612|24730|30670|1322511|6884|36049|32|20|12|1|37|124|19|23.15|58.10|51.12|132.62|971.28|439.85|1103.90|19.42|0.00|253.84|971.28|990.70|1225.12|1244.54|531.43| +2451649|76443|2451742|7633|45329|493247|1612|24730|30670|1322511|6884|36049|49|23|9|4|266|124|33|2.29|5.99|2.69|108.90|88.77|75.57|197.67|0.00|0.00|17.49|88.77|88.77|106.26|106.26|13.20| +2451649|76443|2451769|7721|45329|493247|1612|24730|30670|1322511|6884|36049|53|8|10|3|216|124|35|48.70|85.22|27.27|2028.25|954.45|1704.50|2982.70|19.08|0.00|268.10|954.45|973.53|1222.55|1241.63|-750.05| +2451649|76443|2451758|10441|45329|493247|1612|24730|30670|1322511|6884|36049|17|11|16|1|186|124|77|86.58|218.18|4.36|16464.14|335.72|6666.66|16799.86|23.50|0.00|4535.30|335.72|359.22|4871.02|4894.52|-6330.94| +2451649|76443|2451761|2911|45329|493247|1612|24730|30670|1322511|6884|36049|20|29|19|1|170|124|75|93.94|143.72|102.04|3126.00|7653.00|7045.50|10779.00|0.00|0.00|1616.25|7653.00|7653.00|9269.25|9269.25|607.50| +2451649|76443|2451721|9355|45329|493247|1612|24730|30670|1322511|6884|36049|55|7|10|1|135|124|72|70.21|186.75|69.09|8471.52|4974.48|5055.12|13446.00|0.00|1094.38|537.84|3880.10|3880.10|4417.94|4417.94|-1175.02| +2452638|39420|2452684|5293|97631|1563969|3907|20292|66901|1715954|4106|5644|42|1|4|4|107|125|86|55.31|74.66|18.66|4816.00|1604.76|4756.66|6420.76|112.33|0.00|2696.10|1604.76|1717.09|4300.86|4413.19|-3151.90| +2452638|39420|2452718|4293|97631|1563969|3907|20292|66901|1715954|4106|5644|51|7|2|5|208|125|4|48.69|124.64|89.74|139.60|358.96|194.76|498.56|32.30|0.00|94.72|358.96|391.26|453.68|485.98|164.20| +2452638|39420|2452758|17040|97631|1563969|3907|20292|66901|1715954|4106|5644|36|15|15|5|232|125|95|89.50|114.56|27.49|8271.65|2611.55|8502.50|10883.20|0.00|0.00|5223.10|2611.55|2611.55|7834.65|7834.65|-5890.95| +2452638|39420|2452663|15649|97631|1563969|3907|20292|66901|1715954|4106|5644|3|3|19|4|129|125|56|41.68|87.52|33.25|3039.12|1862.00|2334.08|4901.12|74.48|0.00|490.00|1862.00|1936.48|2352.00|2426.48|-472.08| +2452638|39420|2452648|2641|97631|1563969|3907|20292|66901|1715954|4106|5644|19|1|2|3|117|125|79|23.65|68.58|21.94|3684.56|1733.26|1868.35|5417.82|17.33|0.00|2599.89|1733.26|1750.59|4333.15|4350.48|-135.09| +2452638|39420|2452737|11760|97631|1563969|3907|20292|66901|1715954|4106|5644|39|13|11|3|35|125|87|25.59|69.60|47.32|1938.36|4116.84|2226.33|6055.20|188.96|2017.25|1089.24|2099.59|2288.55|3188.83|3377.79|-126.74| +2452638|39420|2452644|6513|97631|1563969|3907|20292|66901|1715954|4106|5644|55|21|11|1|184|125|15|93.42|262.51|249.38|196.95|3740.70|1401.30|3937.65|261.84|0.00|1023.75|3740.70|4002.54|4764.45|5026.29|2339.40| +2452638|39420|2452758|5023|97631|1563969|3907|20292|66901|1715954|4106|5644|18|12|3|5|87|125|54|11.48|18.82|17.31|81.54|934.74|619.92|1016.28|40.56|355.20|335.34|579.54|620.10|914.88|955.44|-40.38| +2452638|39420|2452641|4057|97631|1563969|3907|20292|66901|1715954|4106|5644|48|1|11|1|188|125|17|7.63|9.53|4.28|89.25|72.76|129.71|162.01|2.35|46.56|45.22|26.20|28.55|71.42|73.77|-103.51| +2452638|39420|2452644|14763|97631|1563969|3907|20292|66901|1715954|4106|5644|45|24|1|2|286|125|10|11.80|11.80|11.56|2.40|115.60|118.00|118.00|4.62|0.00|10.60|115.60|120.22|126.20|130.82|-2.40| +2452638|39420|2452756|5511|97631|1563969|3907|20292|66901|1715954|4106|5644|13|25|9|5|31|125|59|46.03|63.98|47.98|944.00|2830.82|2715.77|3774.82|141.54|0.00|37.17|2830.82|2972.36|2867.99|3009.53|115.05| +2451437|67067|2451521|4592|19818|827546|6474|9002|43306|10433|5739|44267|19|1|2|3|154|126|50|61.84|134.19|22.81|5569.00|1140.50|3092.00|6709.50|102.64|0.00|1341.50|1140.50|1243.14|2482.00|2584.64|-1951.50| +2451437|67067|2451518|5323|19818|827546|6474|9002|43306|10433|5739|44267|25|16|5|4|110|126|45|13.51|21.75|12.61|411.30|567.45|607.95|978.75|16.00|300.74|136.80|266.71|282.71|403.51|419.51|-341.24| +2451437|67067|2451507|5071|19818|827546|6474|9002|43306|10433|5739|44267|50|20|16|1|296|126|31|42.28|103.16|94.90|256.06|2941.90|1310.68|3197.96|147.09|0.00|383.47|2941.90|3088.99|3325.37|3472.46|1631.22| +2451437|67067|2451530|7237|19818|827546|6474|9002|43306|10433|5739|44267|8|20|10|1|255|126|47|31.99|90.85|27.25|2989.20|1280.75|1503.53|4269.95|51.23|0.00|1536.90|1280.75|1331.98|2817.65|2868.88|-222.78| +2451437|67067|2451485|208|19818|827546|6474|9002|43306|10433|5739|44267|19|16|8|5|89|126|81|82.85|190.55|60.97|10495.98|4938.57|6710.85|15434.55|24.19|4592.87|1543.05|345.70|369.89|1888.75|1912.94|-6365.15| +2451437|67067|2451538|9520|19818|827546|6474|9002|43306|10433|5739|44267|55|8|12|4|146|126|47|89.21|161.47|37.13|5843.98|1745.11|4192.87|7589.09|17.45|0.00|3642.50|1745.11|1762.56|5387.61|5405.06|-2447.76| +2451437|67067|2451497|5398|19818|827546|6474|9002|43306|10433|5739|44267|2|14|17|3|102|126|35|22.06|63.31|55.07|288.40|1927.45|772.10|2215.85|12.33|693.88|132.65|1233.57|1245.90|1366.22|1378.55|461.47| +2451437|67067|2451496|4069|19818|827546|6474|9002|43306|10433|5739|44267|13|22|10|3|253|126|47|4.14|8.65|8.21|20.68|385.87|194.58|406.55|15.43|0.00|56.87|385.87|401.30|442.74|458.17|191.29| +2451437|67067|2451443|421|19818|827546|6474|9002|43306|10433|5739|44267|38|10|15|4|160|126|74|91.08|163.94|113.11|3761.42|8370.14|6739.92|12131.56|83.70|0.00|6065.78|8370.14|8453.84|14435.92|14519.62|1630.22| +2451437|67067|2451490|4894|19818|827546|6474|9002|43306|10433|5739|44267|2|20|20|2|181|126|37|15.70|38.77|18.60|746.29|688.20|580.90|1434.49|0.00|178.93|214.97|509.27|509.27|724.24|724.24|-71.63| +2451437|67067|2451553|2756|19818|827546|6474|9002|43306|10433|5739|44267|34|13|13|2|48|126|21|88.46|252.99|252.99|0.00|5312.79|1857.66|5312.79|159.38|0.00|1753.08|5312.79|5472.17|7065.87|7225.25|3455.13| +2452573|49655|2452662|10329|36536|1243818|1147|30429|92749|712421|322|44404|45|24|11|2|166|127|24|55.33|66.39|1.32|1561.68|31.68|1327.92|1593.36|0.63|0.00|191.04|31.68|32.31|222.72|223.35|-1296.24| +2452573|49655|2452610|5991|36536|1243818|1147|30429|92749|712421|322|44404|9|3|16|3|150|127|53|98.83|158.12|121.75|1927.61|6452.75|5237.99|8380.36|387.16|0.00|837.93|6452.75|6839.91|7290.68|7677.84|1214.76| +2452573|49655|2452684|5154|36536|1243818|1147|30429|92749|712421|322|44404|15|18|10|1|72|127|75|69.96|115.43|63.48|3896.25|4761.00|5247.00|8657.25|95.22|0.00|3895.50|4761.00|4856.22|8656.50|8751.72|-486.00| +2452573|49655|2452587|12901|36536|1243818|1147|30429|92749|712421|322|44404|27|27|14|4|104|127|10|81.28|117.85|44.78|730.70|447.80|812.80|1178.50|9.85|250.76|0.00|197.04|206.89|197.04|206.89|-615.76| +2452573|49655|2452600|6282|36536|1243818|1147|30429|92749|712421|322|44404|33|25|19|3|114|127|11|6.54|16.02|12.01|44.11|132.11|71.94|176.22|0.00|0.00|79.20|132.11|132.11|211.31|211.31|60.17| +2452573|49655|2452629|8893|36536|1243818|1147|30429|92749|712421|322|44404|45|25|3|3|253|127|26|14.02|21.87|14.87|182.00|386.62|364.52|568.62|30.92|0.00|136.24|386.62|417.54|522.86|553.78|22.10| +2452573|49655|2452642|11097|36536|1243818|1147|30429|92749|712421|322|44404|36|15|5|2|52|127|47|59.18|127.82|97.14|1441.96|4565.58|2781.46|6007.54|319.59|0.00|1741.82|4565.58|4885.17|6307.40|6626.99|1784.12| +2452573|49655|2452648|4959|36536|1243818|1147|30429|92749|712421|322|44404|18|3|8|4|110|127|98|43.32|56.31|51.80|441.98|5076.40|4245.36|5518.38|0.00|3959.59|2482.34|1116.81|1116.81|3599.15|3599.15|-3128.55| +2452573|49655|2452591|13813|36536|1243818|1147|30429|92749|712421|322|44404|27|9|13|1|149|127|70|35.56|106.68|37.33|4854.50|2613.10|2489.20|7467.60|78.39|0.00|373.10|2613.10|2691.49|2986.20|3064.59|123.90| +2452573|49655|2452665|1704|36536|1243818|1147|30429|92749|712421|322|44404|54|21|2|3|127|127|67|69.86|145.30|21.79|8275.17|1459.93|4680.62|9735.10|26.57|131.39|2433.44|1328.54|1355.11|3761.98|3788.55|-3352.08| +2452573|49655|2452662|12037|36536|1243818|1147|30429|92749|712421|322|44404|6|3|11|5|276|127|78|41.49|121.56|13.37|8438.82|1042.86|3236.22|9481.68|73.00|0.00|1611.48|1042.86|1115.86|2654.34|2727.34|-2193.36| +2452573|49655|2452579|7369|36536|1243818|1147|30429|92749|712421|322|44404|13|3|7|4|144|127|36|10.76|22.70|17.47|188.28|628.92|387.36|817.20|6.28|0.00|106.20|628.92|635.20|735.12|741.40|241.56| +2452573|49655|2452594|7380|36536|1243818|1147|30429|92749|712421|322|44404|24|30|5|3|61|127|42|44.75|96.21|66.38|1252.86|2787.96|1879.50|4040.82|163.09|975.78|1373.82|1812.18|1975.27|3186.00|3349.09|-67.32| +2452573|49655|2452629|6537|36536|1243818|1147|30429|92749|712421|322|44404|27|15|15|4|66|127|54|79.87|201.27|114.72|4673.70|6194.88|4312.98|10868.58|247.79|0.00|977.94|6194.88|6442.67|7172.82|7420.61|1881.90| +2452573|49655|2452658|15132|36536|1243818|1147|30429|92749|712421|322|44404|33|12|2|1|17|127|40|62.50|91.87|61.55|1212.80|2462.00|2500.00|3674.80|147.72|0.00|1837.20|2462.00|2609.72|4299.20|4446.92|-38.00| +2451507|27019|2451616|1888|73673|1357529|4585|22488|41031|541222|103|26359|16|14|17|1|155|128|40|53.55|59.97|4.19|2231.20|167.60|2142.00|2398.80|5.02|0.00|1103.20|167.60|172.62|1270.80|1275.82|-1974.40| +2451507|27019|2451594|15220|73673|1357529|4585|22488|41031|541222|103|26359|8|22|19|3|70|128|86|96.09|197.94|156.37|3575.02|13447.82|8263.74|17022.84|451.84|2151.65|8170.86|11296.17|11748.01|19467.03|19918.87|3032.43| +2451507|27019|2451559|5954|73673|1357529|4585|22488|41031|541222|103|26359|26|13|13|1|39|128|83|49.69|59.62|47.09|1039.99|3908.47|4124.27|4948.46|273.59|0.00|890.59|3908.47|4182.06|4799.06|5072.65|-215.80| +2451507|27019|2451593|17528|73673|1357529|4585|22488|41031|541222|103|26359|25|14|13|3|203|128|46|25.06|58.38|54.29|188.14|2497.34|1152.76|2685.48|99.89|0.00|885.96|2497.34|2597.23|3383.30|3483.19|1344.58| +2451507|27019|2451603|9262|73673|1357529|4585|22488|41031|541222|103|26359|28|28|8|3|38|128|18|75.95|100.25|49.12|920.34|884.16|1367.10|1804.50|79.57|0.00|72.18|884.16|963.73|956.34|1035.91|-482.94| +2451507|27019|2451578|9505|73673|1357529|4585|22488|41031|541222|103|26359|8|28|5|5|45|128|100|14.84|31.46|22.33|913.00|2233.00|1484.00|3146.00|133.98|0.00|377.00|2233.00|2366.98|2610.00|2743.98|749.00| +2451507|27019|2451527|16570|73673|1357529|4585|22488|41031|541222|103|26359|22|7|13|5|114|128|66|49.50|126.72|17.74|7192.68|1170.84|3267.00|8363.52|2.10|1135.71|333.96|35.13|37.23|369.09|371.19|-3231.87| +2451507|27019|2451596|1838|73673|1357529|4585|22488|41031|541222|103|26359|19|25|9|5|271|128|40|30.16|84.74|40.67|1762.80|1626.80|1206.40|3389.60|0.00|764.59|1626.80|862.21|862.21|2489.01|2489.01|-344.19| +2451507|27019|2451609|4108|73673|1357529|4585|22488|41031|541222|103|26359|56|10|20|2|274|128|84|91.65|178.71|109.01|5854.80|9156.84|7698.60|15011.64|274.70|0.00|149.52|9156.84|9431.54|9306.36|9581.06|1458.24| +2451507|27019|2451516|16760|73673|1357529|4585|22488|41031|541222|103|26359|22|13|2|2|105|128|47|82.47|164.11|24.61|6556.50|1156.67|3876.09|7713.17|57.83|0.00|693.72|1156.67|1214.50|1850.39|1908.22|-2719.42| +2451507|27019|2451545|9781|73673|1357529|4585|22488|41031|541222|103|26359|13|4|9|5|51|128|30|61.20|137.08|102.81|1028.10|3084.30|1836.00|4112.40|12.33|2467.44|1562.70|616.86|629.19|2179.56|2191.89|-1219.14| +2451507|27019|2451586|6772|73673|1357529|4585|22488|41031|541222|103|26359|22|10|20|4|168|128|82|37.50|75.00|31.50|3567.00|2583.00|3075.00|6150.00|63.54|464.94|2275.50|2118.06|2181.60|4393.56|4457.10|-956.94| +2451507|27019|2451558|13226|73673|1357529|4585|22488|41031|541222|103|26359|38|7|18|2|9|128|70|74.93|76.42|55.02|1498.00|3851.40|5245.10|5349.40|231.08|0.00|2139.20|3851.40|4082.48|5990.60|6221.68|-1393.70| +2451507|27019|2451574|13741|73673|1357529|4585|22488|41031|541222|103|26359|7|22|17|5|163|128|96|8.16|14.03|3.92|970.56|376.32|783.36|1346.88|30.10|0.00|552.00|376.32|406.42|928.32|958.42|-407.04| +2451169|47837|2451262|4147|49745|1301217|6642|16514|61699|812066|2036|33108|32|14|6|2|288|129|81|63.77|114.14|77.61|2958.93|6286.41|5165.37|9245.34|0.00|0.00|0.00|6286.41|6286.41|6286.41|6286.41|1121.04| +2451169|47837|2451222|8116|49745|1301217|6642|16514|61699|812066|2036|33108|50|22|18|4|203|129|64|49.73|115.87|13.90|6526.08|889.60|3182.72|7415.68|71.16|0.00|519.04|889.60|960.76|1408.64|1479.80|-2293.12| +2451169|47837|2451193|358|49745|1301217|6642|16514|61699|812066|2036|33108|31|2|5|1|124|129|15|29.90|42.15|20.23|328.80|303.45|448.50|632.25|18.20|0.00|214.95|303.45|321.65|518.40|536.60|-145.05| +2451169|47837|2451289|8228|49745|1301217|6642|16514|61699|812066|2036|33108|55|7|20|3|209|129|97|2.96|3.46|0.38|298.76|36.86|287.12|335.62|0.00|0.00|136.77|36.86|36.86|173.63|173.63|-250.26| +2451169|47837|2451207|9274|49745|1301217|6642|16514|61699|812066|2036|33108|55|28|2|4|156|129|53|96.79|186.80|29.88|8316.76|1583.64|5129.87|9900.40|26.60|1140.22|1880.97|443.42|470.02|2324.39|2350.99|-4686.45| +2451169|47837|2451221|4906|49745|1301217|6642|16514|61699|812066|2036|33108|7|7|1|1|168|129|44|91.91|123.15|44.33|3468.08|1950.52|4044.04|5418.60|156.04|0.00|162.36|1950.52|2106.56|2112.88|2268.92|-2093.52| +2451169|47837|2451173|9457|49745|1301217|6642|16514|61699|812066|2036|33108|46|20|8|5|212|129|55|42.50|116.45|18.63|5380.10|1024.65|2337.50|6404.75|51.23|0.00|3202.10|1024.65|1075.88|4226.75|4277.98|-1312.85| +2451169|47837|2451192|3541|49745|1301217|6642|16514|61699|812066|2036|33108|16|16|9|4|279|129|88|48.84|79.12|26.90|4595.36|2367.20|4297.92|6962.56|86.64|923.20|2296.80|1444.00|1530.64|3740.80|3827.44|-2853.92| +2451169|47837|2451235|11912|49745|1301217|6642|16514|61699|812066|2036|33108|28|16|6|5|44|129|82|92.36|167.17|6.68|13160.18|547.76|7573.52|13707.94|16.43|0.00|684.70|547.76|564.19|1232.46|1248.89|-7025.76| +2451169|47837|2451285|16153|49745|1301217|6642|16514|61699|812066|2036|33108|2|25|14|5|196|129|22|29.98|58.46|16.95|913.22|372.90|659.56|1286.12|0.00|0.00|527.12|372.90|372.90|900.02|900.02|-286.66| +2451169|47837|2451170|4801|49745|1301217|6642|16514|61699|812066|2036|33108|55|16|20|1|192|129|11|43.31|112.60|70.93|458.37|780.23|476.41|1238.60|46.81|0.00|458.26|780.23|827.04|1238.49|1285.30|303.82| +2451169|47837|2451222|3608|49745|1301217|6642|16514|61699|812066|2036|33108|32|4|19|5|51|129|67|62.83|132.57|108.70|1599.29|7282.90|4209.61|8882.19|145.65|0.00|2575.48|7282.90|7428.55|9858.38|10004.03|3073.29| +2451169|47837|2451275|1087|49745|1301217|6642|16514|61699|812066|2036|33108|16|22|15|3|171|129|69|52.91|93.65|0.93|6397.68|64.17|3650.79|6461.85|1.28|0.00|580.98|64.17|65.45|645.15|646.43|-3586.62| +2451484|39865|2451569|16844|73522|91558|6041|17677|39744|890554|3879|32315|52|4|2|3|67|130|20|66.60|123.21|87.47|714.80|1749.40|1332.00|2464.20|17.49|0.00|689.80|1749.40|1766.89|2439.20|2456.69|417.40| +2451484|39865|2451503|16582|73522|91558|6041|17677|39744|890554|3879|32315|43|26|16|3|230|130|50|94.16|271.18|219.65|2576.50|10982.50|4708.00|13559.00|988.42|0.00|2440.50|10982.50|11970.92|13423.00|14411.42|6274.50| +2451484|39865|2451509|2371|73522|91558|6041|17677|39744|890554|3879|32315|55|20|7|5|133|130|54|78.63|142.32|83.96|3151.44|4533.84|4246.02|7685.28|317.36|0.00|460.62|4533.84|4851.20|4994.46|5311.82|287.82| +2451484|39865|2451591|826|73522|91558|6041|17677|39744|890554|3879|32315|56|16|19|2|152|130|43|67.48|153.17|145.51|329.38|6256.93|2901.64|6586.31|563.12|0.00|2107.43|6256.93|6820.05|8364.36|8927.48|3355.29| +2451484|39865|2451512|6379|73522|91558|6041|17677|39744|890554|3879|32315|43|2|2|2|212|130|91|43.90|107.99|24.83|7567.56|2259.53|3994.90|9827.09|79.08|1129.76|4323.41|1129.77|1208.85|5453.18|5532.26|-2865.13| +2451484|39865|2451502|10423|73522|91558|6041|17677|39744|890554|3879|32315|50|28|1|5|116|130|32|60.75|159.16|93.90|2088.32|3004.80|1944.00|5093.12|270.43|0.00|967.68|3004.80|3275.23|3972.48|4242.91|1060.80| +2451484|39865|2451566|17011|73522|91558|6041|17677|39744|890554|3879|32315|2|10|10|4|228|130|91|80.38|152.72|83.99|6254.43|7643.09|7314.58|13897.52|105.47|4127.26|6114.29|3515.83|3621.30|9630.12|9735.59|-3798.75| +2451484|39865|2451520|529|73522|91558|6041|17677|39744|890554|3879|32315|28|8|12|4|289|130|37|74.52|133.39|9.33|4590.22|345.21|2757.24|4935.43|3.45|0.00|394.79|345.21|348.66|740.00|743.45|-2412.03| +2451484|39865|2451532|1129|73522|91558|6041|17677|39744|890554|3879|32315|50|16|19|2|122|130|30|89.59|268.77|134.38|4031.70|4031.40|2687.70|8063.10|0.00|0.00|886.80|4031.40|4031.40|4918.20|4918.20|1343.70| +2451484|39865|2451516|7400|73522|91558|6041|17677|39744|890554|3879|32315|32|22|14|4|244|130|26|59.37|133.58|101.52|833.56|2639.52|1543.62|3473.08|52.79|0.00|1215.50|2639.52|2692.31|3855.02|3907.81|1095.90| +2451484|39865|2451509|16843|73522|91558|6041|17677|39744|890554|3879|32315|16|7|2|5|298|130|93|35.29|56.81|36.35|1902.78|3380.55|3281.97|5283.33|173.08|1216.99|1689.81|2163.56|2336.64|3853.37|4026.45|-1118.41| +2451484|39865|2451517|16298|73522|91558|6041|17677|39744|890554|3879|32315|56|28|4|5|26|130|94|11.64|13.03|3.64|882.66|342.16|1094.16|1224.82|0.00|0.00|73.32|342.16|342.16|415.48|415.48|-752.00| +2451484|39865|2451504|14311|73522|91558|6041|17677|39744|890554|3879|32315|8|28|2|2|262|130|98|45.38|132.50|17.22|11297.44|1687.56|4447.24|12985.00|54.67|320.63|1298.50|1366.93|1421.60|2665.43|2720.10|-3080.31| +2452171|78440|2452233|1567|9906|1805575|5597|7666|7721|1123492|2226|15895|41|19|9|1|240|131|41|3.34|6.04|1.32|193.52|54.12|136.94|247.64|3.24|0.00|46.74|54.12|57.36|100.86|104.10|-82.82| +2452171|78440|2452220|13073|9906|1805575|5597|7666|7721|1123492|2226|15895|45|25|3|3|193|131|46|43.56|88.42|35.36|2440.76|1626.56|2003.76|4067.32|113.85|0.00|1341.82|1626.56|1740.41|2968.38|3082.23|-377.20| +2452171|78440|2452275|3893|9906|1805575|5597|7666|7721|1123492|2226|15895|1|15|15|2|269|131|1|89.18|113.25|80.40|32.85|80.40|89.18|113.25|3.37|32.16|53.22|48.24|51.61|101.46|104.83|-40.94| +2452171|78440|2452218|11439|9906|1805575|5597|7666|7721|1123492|2226|15895|45|7|5|1|97|131|81|95.17|103.73|60.16|3529.17|4872.96|7708.77|8402.13|194.91|0.00|3780.27|4872.96|5067.87|8653.23|8848.14|-2835.81| +2452171|78440|2452191|12991|9906|1805575|5597|7666|7721|1123492|2226|15895|57|7|16|3|123|131|71|55.61|81.19|23.54|4093.15|1671.34|3948.31|5764.49|83.56|0.00|1268.06|1671.34|1754.90|2939.40|3022.96|-2276.97| +2452171|78440|2452191|13909|9906|1805575|5597|7666|7721|1123492|2226|15895|29|23|9|4|266|131|73|32.87|43.38|24.72|1362.18|1804.56|2399.51|3166.74|144.36|0.00|31.39|1804.56|1948.92|1835.95|1980.31|-594.95| +2452171|78440|2452228|3857|9906|1805575|5597|7666|7721|1123492|2226|15895|59|13|14|1|81|131|37|88.15|185.11|42.57|5273.98|1575.09|3261.55|6849.07|141.75|0.00|547.60|1575.09|1716.84|2122.69|2264.44|-1686.46| +2452171|78440|2452192|7119|9906|1805575|5597|7666|7721|1123492|2226|15895|9|3|19|1|150|131|90|43.18|94.56|30.25|5787.90|2722.50|3886.20|8510.40|190.57|0.00|594.90|2722.50|2913.07|3317.40|3507.97|-1163.70| +2452171|78440|2452262|1201|9906|1805575|5597|7666|7721|1123492|2226|15895|5|21|16|1|289|131|74|60.90|155.29|69.88|6320.34|5171.12|4506.60|11491.46|51.71|0.00|0.00|5171.12|5222.83|5171.12|5222.83|664.52| +2452171|78440|2452227|13061|9906|1805575|5597|7666|7721|1123492|2226|15895|41|17|10|3|109|131|8|27.90|33.20|11.62|172.64|92.96|223.20|265.60|0.19|86.45|0.00|6.51|6.70|6.51|6.70|-216.69| +2452171|78440|2452283|531|9906|1805575|5597|7666|7721|1123492|2226|15895|13|29|6|4|265|131|89|30.37|80.78|30.69|4458.01|2731.41|2702.93|7189.42|109.25|0.00|3594.71|2731.41|2840.66|6326.12|6435.37|28.48| +2451160|78790|2451174|8878|85582|1641083|5737|22038|30867|917433|94|15195|52|8|16|4|52|132|5|16.11|16.43|10.35|30.40|51.75|80.55|82.15|2.07|0.00|27.10|51.75|53.82|78.85|80.92|-28.80| +2451160|78790|2451203|427|85582|1641083|5737|22038|30867|917433|94|15195|32|25|9|5|278|132|54|80.19|225.33|67.59|8517.96|3649.86|4330.26|12167.82|4.37|3211.87|3041.82|437.99|442.36|3479.81|3484.18|-3892.27| +2451160|78790|2451173|571|85582|1641083|5737|22038|30867|917433|94|15195|16|7|19|4|102|132|54|90.56|166.63|78.31|4769.28|4228.74|4890.24|8998.02|338.29|0.00|1709.10|4228.74|4567.03|5937.84|6276.13|-661.50| +2451160|78790|2451188|8840|85582|1641083|5737|22038|30867|917433|94|15195|55|25|8|3|208|132|28|88.69|253.65|116.67|3835.44|3266.76|2483.32|7102.20|91.46|1437.37|1988.56|1829.39|1920.85|3817.95|3909.41|-653.93| +2451160|78790|2451257|10748|85582|1641083|5737|22038|30867|917433|94|15195|46|25|12|1|70|132|81|35.54|75.34|39.17|2929.77|3172.77|2878.74|6102.54|12.69|2918.94|2867.40|253.83|266.52|3121.23|3133.92|-2624.91| +2451160|78790|2451278|9238|85582|1641083|5737|22038|30867|917433|94|15195|56|22|15|5|28|132|72|78.57|78.57|54.99|1697.76|3959.28|5657.04|5657.04|0.00|0.00|1414.08|3959.28|3959.28|5373.36|5373.36|-1697.76| +2451160|78790|2451213|16954|85582|1641083|5737|22038|30867|917433|94|15195|14|25|11|1|201|132|90|68.77|176.05|137.31|3486.60|12357.90|6189.30|15844.50|129.75|8032.63|2376.00|4325.27|4455.02|6701.27|6831.02|-1864.03| +2451160|78790|2451185|3742|85582|1641083|5737|22038|30867|917433|94|15195|58|10|19|5|211|132|10|70.44|140.17|116.34|238.30|1163.40|704.40|1401.70|2.44|919.08|182.20|244.32|246.76|426.52|428.96|-460.08| +2451160|78790|2451167|14414|85582|1641083|5737|22038|30867|917433|94|15195|2|20|13|2|174|132|27|68.65|133.18|83.90|1330.56|2265.30|1853.55|3595.86|135.91|0.00|1294.38|2265.30|2401.21|3559.68|3695.59|411.75| +2451160|78790|2451210|4502|85582|1641083|5737|22038|30867|917433|94|15195|52|22|4|4|214|132|56|48.95|93.98|47.92|2579.36|2683.52|2741.20|5262.88|241.51|0.00|1368.08|2683.52|2925.03|4051.60|4293.11|-57.68| +2451160|78790|2451227|6430|85582|1641083|5737|22038|30867|917433|94|15195|32|10|15|4|273|132|2|75.74|185.56|144.73|81.66|289.46|151.48|371.12|0.00|289.46|96.48|0.00|0.00|96.48|96.48|-151.48| +2451160|78790|2451226|4480|85582|1641083|5737|22038|30867|917433|94|15195|40|2|16|2|111|132|87|67.05|178.35|67.77|9620.46|5895.99|5833.35|15516.45|117.91|0.00|7447.20|5895.99|6013.90|13343.19|13461.10|62.64| +2451160|78790|2451229|5980|85582|1641083|5737|22038|30867|917433|94|15195|13|19|13|3|236|132|20|72.51|135.59|75.93|1193.20|1518.60|1450.20|2711.80|78.96|531.51|1247.40|987.09|1066.05|2234.49|2313.45|-463.11| +2451160|78790|2451208|15370|85582|1641083|5737|22038|30867|917433|94|15195|1|14|2|1|50|132|62|53.48|102.68|88.30|891.56|5474.60|3315.76|6366.16|383.22|0.00|2355.38|5474.60|5857.82|7829.98|8213.20|2158.84| +2451162|50155|2451204|7408|26465|1196287|1309|33178|1049|231132|2291|30913|26|19|3|1|282|133|79|6.53|7.37|2.50|384.73|197.50|515.87|582.23|1.97|0.00|86.90|197.50|199.47|284.40|286.37|-318.37| +2451162|50155|2451198|15478|26465|1196287|1309|33178|1049|231132|2291|30913|28|10|7|3|152|133|47|45.04|75.66|12.10|2987.32|568.70|2116.88|3556.02|43.22|28.43|355.32|540.27|583.49|895.59|938.81|-1576.61| +2451162|50155|2451198|11923|26465|1196287|1309|33178|1049|231132|2291|30913|56|14|17|1|65|133|53|73.35|161.37|90.36|3763.53|4789.08|3887.55|8552.61|143.67|0.00|2479.87|4789.08|4932.75|7268.95|7412.62|901.53| +2451162|50155|2451163|16726|26465|1196287|1309|33178|1049|231132|2291|30913|49|2|19|1|180|133|25|60.40|83.35|13.33|1750.50|333.25|1510.00|2083.75|2.99|283.26|208.25|49.99|52.98|258.24|261.23|-1460.01| +2451162|50155|2451277|16465|26465|1196287|1309|33178|1049|231132|2291|30913|2|16|9|4|189|133|30|55.94|99.57|60.73|1165.20|1821.90|1678.20|2987.10|54.65|0.00|417.90|1821.90|1876.55|2239.80|2294.45|143.70| +2451162|50155|2451237|9865|26465|1196287|1309|33178|1049|231132|2291|30913|25|26|18|2|153|133|91|26.94|27.20|14.14|1188.46|1286.74|2451.54|2475.20|51.46|0.00|618.80|1286.74|1338.20|1905.54|1957.00|-1164.80| +2451162|50155|2451266|524|26465|1196287|1309|33178|1049|231132|2291|30913|56|25|10|4|251|133|6|77.74|167.14|66.85|601.74|401.10|466.44|1002.84|12.03|0.00|10.02|401.10|413.13|411.12|423.15|-65.34| +2451162|50155|2451186|11246|26465|1196287|1309|33178|1049|231132|2291|30913|8|14|4|5|153|133|61|48.39|70.16|13.33|3466.63|813.13|2951.79|4279.76|32.52|0.00|85.40|813.13|845.65|898.53|931.05|-2138.66| +2451162|50155|2451198|13405|26465|1196287|1309|33178|1049|231132|2291|30913|55|28|5|2|299|133|3|56.60|116.03|84.70|93.99|254.10|169.80|348.09|20.32|0.00|48.72|254.10|274.42|302.82|323.14|84.30| +2451162|50155|2451239|17929|26465|1196287|1309|33178|1049|231132|2291|30913|50|2|8|2|133|133|43|13.62|21.65|6.92|633.39|297.56|585.66|930.95|23.80|0.00|64.93|297.56|321.36|362.49|386.29|-288.10| +2452496|33301|2452594|16983|24050|1584970|1090|19610|46339|268737|5217|47337|18|3|18|4|57|134|9|53.69|141.74|109.13|293.49|982.17|483.21|1275.66|88.39|0.00|318.87|982.17|1070.56|1301.04|1389.43|498.96| +2452496|33301|2452578|3210|24050|1584970|1090|19610|46339|268737|5217|47337|1|25|18|5|72|134|37|10.21|26.95|2.15|917.60|79.55|377.77|997.15|3.97|0.00|448.44|79.55|83.52|527.99|531.96|-298.22| +2452496|33301|2452608|14835|24050|1584970|1090|19610|46339|268737|5217|47337|18|1|3|5|217|134|28|21.65|38.53|25.42|367.08|711.76|606.20|1078.84|49.82|0.00|237.16|711.76|761.58|948.92|998.74|105.56| +2452496|33301|2452542|3969|24050|1584970|1090|19610|46339|268737|5217|47337|30|27|6|5|64|134|37|91.49|118.02|82.61|1310.17|3056.57|3385.13|4366.74|244.52|0.00|960.52|3056.57|3301.09|4017.09|4261.61|-328.56| +2452496|33301|2452541|10519|24050|1584970|1090|19610|46339|268737|5217|47337|15|9|2|4|56|134|15|50.40|55.44|43.24|183.00|648.60|756.00|831.60|38.91|0.00|33.15|648.60|687.51|681.75|720.66|-107.40| +2452496|33301|2452538|8190|24050|1584970|1090|19610|46339|268737|5217|47337|6|25|6|4|269|134|71|83.10|177.00|162.84|1005.36|11561.64|5900.10|12567.00|809.31|0.00|4775.46|11561.64|12370.95|16337.10|17146.41|5661.54| +2452496|33301|2452514|3597|24050|1584970|1090|19610|46339|268737|5217|47337|15|1|14|2|250|134|36|11.07|28.44|6.82|778.32|245.52|398.52|1023.84|6.87|147.31|51.12|98.21|105.08|149.33|156.20|-300.31| +2452496|33301|2452533|5773|24050|1584970|1090|19610|46339|268737|5217|47337|37|25|14|3|249|134|23|49.88|112.72|51.85|1400.01|1192.55|1147.24|2592.56|83.47|0.00|103.50|1192.55|1276.02|1296.05|1379.52|45.31| +2452496|33301|2452593|8049|24050|1584970|1090|19610|46339|268737|5217|47337|19|3|14|4|91|134|80|2.65|5.22|1.30|313.60|104.00|212.00|417.60|5.20|0.00|74.40|104.00|109.20|178.40|183.60|-108.00| +2452496|33301|2452514|14611|24050|1584970|1090|19610|46339|268737|5217|47337|43|6|4|5|66|134|93|75.23|139.92|76.95|5856.21|7156.35|6996.39|13012.56|143.12|0.00|1431.27|7156.35|7299.47|8587.62|8730.74|159.96| +2452496|33301|2452587|12363|24050|1584970|1090|19610|46339|268737|5217|47337|57|9|18|4|157|134|29|71.94|146.75|105.66|1191.61|3064.14|2086.26|4255.75|30.64|0.00|468.06|3064.14|3094.78|3532.20|3562.84|977.88| +2452496|33301|2452502|14859|24050|1584970|1090|19610|46339|268737|5217|47337|27|13|13|1|272|134|91|36.50|51.10|0.51|4603.69|46.41|3321.50|4650.10|0.00|0.00|650.65|46.41|46.41|697.06|697.06|-3275.09| +2452496|33301|2452585|5515|24050|1584970|1090|19610|46339|268737|5217|47337|21|21|18|3|265|134|64|36.43|95.81|60.36|2268.80|3863.04|2331.52|6131.84|0.00|0.00|2697.60|3863.04|3863.04|6560.64|6560.64|1531.52| +2451510|43393|2451586|3632|26805|1783570|7088|21006|26805|1783570|7088|21006|37|14|17|1|185|135|40|90.27|178.73|71.49|4289.60|2859.60|3610.80|7149.20|142.98|0.00|2645.20|2859.60|3002.58|5504.80|5647.78|-751.20| +2451510|43393|2451623|6550|26805|1783570|7088|21006|26805|1783570|7088|21006|7|4|9|3|127|135|33|96.60|171.94|116.91|1815.99|3858.03|3187.80|5674.02|270.06|0.00|1248.06|3858.03|4128.09|5106.09|5376.15|670.23| +2451510|43393|2451561|7966|26805|1783570|7088|21006|26805|1783570|7088|21006|28|7|13|2|271|135|68|27.78|38.05|21.30|1139.00|1448.40|1889.04|2587.40|30.41|1013.88|25.84|434.52|464.93|460.36|490.77|-1454.52| +2451510|43393|2451528|5560|26805|1783570|7088|21006|26805|1783570|7088|21006|32|19|13|4|192|135|31|6.79|15.27|10.99|132.68|340.69|210.49|473.37|13.62|0.00|75.64|340.69|354.31|416.33|429.95|130.20| +2451510|43393|2451518|2264|26805|1783570|7088|21006|26805|1783570|7088|21006|2|4|14|5|276|135|78|49.78|117.48|78.71|3024.06|6139.38|3882.84|9163.44|122.78|0.00|2932.02|6139.38|6262.16|9071.40|9194.18|2256.54| +2451510|43393|2451545|8827|26805|1783570|7088|21006|26805|1783570|7088|21006|26|26|7|2|67|135|23|39.92|54.69|4.92|1144.71|113.16|918.16|1257.87|2.26|0.00|50.14|113.16|115.42|163.30|165.56|-805.00| +2451510|43393|2451579|17998|26805|1783570|7088|21006|26805|1783570|7088|21006|38|7|18|2|233|135|63|59.36|117.53|16.45|6368.04|1036.35|3739.68|7404.39|51.81|0.00|3331.44|1036.35|1088.16|4367.79|4419.60|-2703.33| +2451510|43393|2451560|4166|26805|1783570|7088|21006|26805|1783570|7088|21006|43|10|7|3|62|135|80|76.64|129.52|94.54|2798.40|7563.20|6131.20|10361.60|15.12|6806.88|3729.60|756.32|771.44|4485.92|4501.04|-5374.88| +2451510|43393|2451571|2008|26805|1783570|7088|21006|26805|1783570|7088|21006|37|28|8|4|160|135|100|14.12|37.41|5.61|3180.00|561.00|1412.00|3741.00|11.22|0.00|1496.00|561.00|572.22|2057.00|2068.22|-851.00| +2451510|43393|2451578|15340|26805|1783570|7088|21006|26805|1783570|7088|21006|14|26|20|1|242|135|13|31.35|56.74|17.58|509.08|228.54|407.55|737.62|18.28|0.00|7.28|228.54|246.82|235.82|254.10|-179.01| +2452127|43101|2452230|3231|77225|1147154|2683|41412|94282|1272679|5246|43975|35|5|5|5|212|136|83|15.22|26.48|6.62|1648.38|549.46|1263.26|2197.84|49.45|0.00|0.00|549.46|598.91|549.46|598.91|-713.80| +2452127|43101|2452222|1461|77225|1147154|2683|41412|94282|1272679|5246|43975|51|29|7|4|238|136|11|15.99|20.78|16.41|48.07|180.51|175.89|228.58|9.02|0.00|50.27|180.51|189.53|230.78|239.80|4.62| +2452127|43101|2452158|14849|77225|1147154|2683|41412|94282|1272679|5246|43975|21|29|19|3|177|136|88|73.33|183.32|86.16|8550.08|7582.08|6453.04|16132.16|303.28|0.00|322.08|7582.08|7885.36|7904.16|8207.44|1129.04| +2452127|43101|2452179|14413|77225|1147154|2683|41412|94282|1272679|5246|43975|47|17|17|2|10|136|19|72.84|108.53|107.44|20.71|2041.36|1383.96|2062.07|81.65|0.00|350.55|2041.36|2123.01|2391.91|2473.56|657.40| +2452127|43101|2452144|523|77225|1147154|2683|41412|94282|1272679|5246|43975|35|25|5|5|185|136|5|98.60|107.47|77.37|150.50|386.85|493.00|537.35|0.07|379.11|26.85|7.74|7.81|34.59|34.66|-485.26| +2452127|43101|2452225|13939|77225|1147154|2683|41412|94282|1272679|5246|43975|47|11|8|5|35|136|28|46.78|134.25|119.48|413.56|3345.44|1309.84|3759.00|66.90|0.00|1315.44|3345.44|3412.34|4660.88|4727.78|2035.60| +2452127|43101|2452185|13023|77225|1147154|2683|41412|94282|1272679|5246|43975|23|7|19|4|289|136|14|11.79|23.93|6.70|241.22|93.80|165.06|335.02|6.56|0.00|140.70|93.80|100.36|234.50|241.06|-71.26| +2452127|43101|2452218|6931|77225|1147154|2683|41412|94282|1272679|5246|43975|1|25|15|2|19|136|54|68.91|148.84|87.81|3295.62|4741.74|3721.14|8037.36|0.00|0.00|2973.78|4741.74|4741.74|7715.52|7715.52|1020.60| +2452127|43101|2452200|13627|77225|1147154|2683|41412|94282|1272679|5246|43975|31|3|18|4|170|136|68|38.14|51.48|2.57|3325.88|174.76|2593.52|3500.64|6.99|0.00|244.80|174.76|181.75|419.56|426.55|-2418.76| +2452127|43101|2452148|16787|77225|1147154|2683|41412|94282|1272679|5246|43975|45|21|2|5|28|136|55|74.66|194.86|68.20|6966.30|3751.00|4106.30|10717.30|37.51|0.00|1714.35|3751.00|3788.51|5465.35|5502.86|-355.30| +2452127|43101|2452183|6999|77225|1147154|2683|41412|94282|1272679|5246|43975|13|25|12|3|107|136|11|74.69|103.81|9.34|1039.17|102.74|821.59|1141.91|1.02|0.00|502.37|102.74|103.76|605.11|606.13|-718.85| +2452127|43101|2452202|14861|77225|1147154|2683|41412|94282|1272679|5246|43975|1|7|10|3|276|136|87|67.20|75.93|74.41|132.24|6473.67|5846.40|6605.91|0.00|0.00|990.06|6473.67|6473.67|7463.73|7463.73|627.27| +2452127|43101|2452145|6301|77225|1147154|2683|41412|94282|1272679|5246|43975|3|15|11|2|153|136|29|51.43|127.54|77.79|1442.75|2255.91|1491.47|3698.66|90.23|0.00|554.77|2255.91|2346.14|2810.68|2900.91|764.44| +2451446|79601|2451549|7072|72575|1638838|2701|11764|16255|1274557|6153|14016|31|8|4|3|2|137|83|81.38|145.67|53.89|7617.74|4472.87|6754.54|12090.61|14.76|3980.85|604.24|492.02|506.78|1096.26|1111.02|-6262.52| +2451446|79601|2451537|9826|72575|1638838|2701|11764|16255|1274557|6153|14016|52|7|14|1|168|137|38|16.93|46.38|6.02|1533.68|228.76|643.34|1762.44|4.39|155.55|193.80|73.21|77.60|267.01|271.40|-570.13| +2451446|79601|2451492|11614|72575|1638838|2701|11764|16255|1274557|6153|14016|43|4|4|5|239|137|57|64.87|95.35|89.62|326.61|5108.34|3697.59|5434.95|153.25|0.00|1847.37|5108.34|5261.59|6955.71|7108.96|1410.75| +2451446|79601|2451488|11680|72575|1638838|2701|11764|16255|1274557|6153|14016|13|25|12|3|167|137|17|77.08|134.11|127.40|114.07|2165.80|1310.36|2279.87|57.17|736.37|341.87|1429.43|1486.60|1771.30|1828.47|119.07| +2451446|79601|2451498|6304|72575|1638838|2701|11764|16255|1274557|6153|14016|20|4|15|5|272|137|83|24.53|58.62|53.93|389.27|4476.19|2035.99|4865.46|107.42|1790.47|826.68|2685.72|2793.14|3512.40|3619.82|649.73| +2451446|79601|2451528|13057|72575|1638838|2701|11764|16255|1274557|6153|14016|37|10|13|5|57|137|61|57.90|149.38|73.19|4647.59|4464.59|3531.90|9112.18|29.46|1517.96|546.56|2946.63|2976.09|3493.19|3522.65|-585.27| +2451446|79601|2451540|14845|72575|1638838|2701|11764|16255|1274557|6153|14016|1|19|12|4|226|137|94|11.50|17.13|7.53|902.40|707.82|1081.00|1610.22|29.30|382.22|466.24|325.60|354.90|791.84|821.14|-755.40| +2451446|79601|2451484|16472|72575|1638838|2701|11764|16255|1274557|6153|14016|44|25|16|1|149|137|50|36.31|84.23|32.84|2569.50|1642.00|1815.50|4211.50|147.78|0.00|1768.50|1642.00|1789.78|3410.50|3558.28|-173.50| +2451446|79601|2451546|1426|72575|1638838|2701|11764|16255|1274557|6153|14016|32|22|10|3|96|137|99|48.02|73.47|28.65|4437.18|2836.35|4753.98|7273.53|65.80|1191.26|2399.76|1645.09|1710.89|4044.85|4110.65|-3108.89| +2451446|79601|2451474|5671|72575|1638838|2701|11764|16255|1274557|6153|14016|19|2|17|4|61|137|75|45.67|87.22|27.91|4448.25|2093.25|3425.25|6541.50|146.52|0.00|1373.25|2093.25|2239.77|3466.50|3613.02|-1332.00| +2451446|79601|2451520|11464|72575|1638838|2701|11764|16255|1274557|6153|14016|22|1|13|5|44|137|27|10.10|11.71|0.81|294.30|21.87|272.70|316.17|0.00|0.00|88.29|21.87|21.87|110.16|110.16|-250.83| +2451446|79601|2451537|2293|72575|1638838|2701|11764|16255|1274557|6153|14016|28|22|9|1|93|137|32|96.08|282.47|115.81|5333.12|3705.92|3074.56|9039.04|333.53|0.00|0.00|3705.92|4039.45|3705.92|4039.45|631.36| +2451446|79601|2451537|15590|72575|1638838|2701|11764|16255|1274557|6153|14016|20|22|3|4|225|137|92|14.43|29.58|27.80|163.76|2557.60|1327.56|2721.36|0.00|0.00|652.28|2557.60|2557.60|3209.88|3209.88|1230.04| +2451446|79601|2451540|10411|72575|1638838|2701|11764|16255|1274557|6153|14016|49|19|15|2|152|137|92|70.81|124.62|93.46|2866.72|8598.32|6514.52|11465.04|601.88|0.00|2750.80|8598.32|9200.20|11349.12|11951.00|2083.80| +2451446|79601|2451524|5698|72575|1638838|2701|11764|16255|1274557|6153|14016|10|8|1|5|226|137|74|71.95|133.10|111.80|1576.20|8273.20|5324.30|9849.40|248.19|0.00|2954.82|8273.20|8521.39|11228.02|11476.21|2948.90| +2451446|79601|2451483|3778|72575|1638838|2701|11764|16255|1274557|6153|14016|7|22|7|5|283|137|12|26.23|68.46|27.38|492.96|328.56|314.76|821.52|3.28|0.00|16.32|328.56|331.84|344.88|348.16|13.80| +2451407|76320|2451438|997|93916|83515|6361|10760|99282|634616|51|5650|13|26|18|2|26|138|22|84.72|168.59|94.41|1631.96|2077.02|1863.84|3708.98|186.93|0.00|890.12|2077.02|2263.95|2967.14|3154.07|213.18| +2451407|76320|2451435|13090|93916|83515|6361|10760|99282|634616|51|5650|22|22|9|4|169|138|26|20.84|51.89|35.80|418.34|930.80|541.84|1349.14|16.47|381.62|26.78|549.18|565.65|575.96|592.43|7.34| +2451407|76320|2451469|1600|93916|83515|6361|10760|99282|634616|51|5650|31|19|4|3|299|138|93|82.22|216.23|99.46|10859.61|9249.78|7646.46|20109.39|462.48|0.00|7239.12|9249.78|9712.26|16488.90|16951.38|1603.32| +2451407|76320|2451457|9380|93916|83515|6361|10760|99282|634616|51|5650|1|22|3|4|255|138|46|89.26|142.81|14.28|5912.38|656.88|4105.96|6569.26|21.28|420.40|0.00|236.48|257.76|236.48|257.76|-3869.48| +2451407|76320|2451513|766|93916|83515|6361|10760|99282|634616|51|5650|38|7|15|4|143|138|54|77.16|228.39|162.15|3576.96|8756.10|4166.64|12333.06|612.92|0.00|3946.32|8756.10|9369.02|12702.42|13315.34|4589.46| +2451407|76320|2451468|2536|93916|83515|6361|10760|99282|634616|51|5650|34|7|20|1|142|138|19|10.87|21.19|5.29|302.10|100.51|206.53|402.61|2.95|2.01|193.23|98.50|101.45|291.73|294.68|-108.03| +2451407|76320|2451474|14446|93916|83515|6361|10760|99282|634616|51|5650|34|28|15|5|172|138|38|30.88|80.90|79.28|61.56|3012.64|1173.44|3074.20|150.63|0.00|1475.54|3012.64|3163.27|4488.18|4638.81|1839.20| +2451407|76320|2451451|7862|93916|83515|6361|10760|99282|634616|51|5650|16|16|2|2|265|138|55|87.45|190.64|40.03|8283.55|2201.65|4809.75|10485.20|198.14|0.00|419.10|2201.65|2399.79|2620.75|2818.89|-2608.10| +2451407|76320|2451500|6889|93916|83515|6361|10760|99282|634616|51|5650|8|26|6|5|267|138|96|13.85|27.70|19.66|771.84|1887.36|1329.60|2659.20|150.98|0.00|398.40|1887.36|2038.34|2285.76|2436.74|557.76| +2451407|76320|2451430|7396|93916|83515|6361|10760|99282|634616|51|5650|43|26|3|5|154|138|47|28.10|41.86|10.46|1475.80|491.62|1320.70|1967.42|9.83|0.00|235.94|491.62|501.45|727.56|737.39|-829.08| +2451407|76320|2451484|3692|93916|83515|6361|10760|99282|634616|51|5650|31|28|4|3|65|138|57|63.97|142.01|105.08|2105.01|5989.56|3646.29|8094.57|117.39|4312.48|2509.14|1677.08|1794.47|4186.22|4303.61|-1969.21| +2451407|76320|2451483|7729|93916|83515|6361|10760|99282|634616|51|5650|49|7|3|5|144|138|15|43.87|49.57|46.10|52.05|691.50|658.05|743.55|15.14|186.70|282.45|504.80|519.94|787.25|802.39|-153.25| +2451407|76320|2451429|10126|93916|83515|6361|10760|99282|634616|51|5650|28|10|7|3|249|138|74|30.05|82.33|22.22|4448.14|1644.28|2223.70|6092.42|16.44|1315.42|1278.72|328.86|345.30|1607.58|1624.02|-1894.84| +2451173|47364|2451265|17320|40917|915281|1292|32056|40917|915281|1292|32056|52|8|10|3|73|139|86|4.33|11.47|7.22|365.50|620.92|372.38|986.42|55.88|0.00|29.24|620.92|676.80|650.16|706.04|248.54| +2451173|47364|2451246|8758|40917|915281|1292|32056|40917|915281|1292|32056|10|8|11|2|83|139|31|62.00|161.20|64.48|2998.32|1998.88|1922.00|4997.20|43.17|919.48|749.58|1079.40|1122.57|1828.98|1872.15|-842.60| +2451173|47364|2451270|1760|40917|915281|1292|32056|40917|915281|1292|32056|34|20|14|4|297|139|92|48.93|123.30|16.02|9869.76|1473.84|4501.56|11343.60|14.73|0.00|5671.80|1473.84|1488.57|7145.64|7160.37|-3027.72| +2451173|47364|2451215|10550|40917|915281|1292|32056|40917|915281|1292|32056|14|2|13|4|95|139|5|7.04|11.89|4.87|35.10|24.35|35.20|59.45|0.00|10.47|5.35|13.88|13.88|19.23|19.23|-21.32| +2451173|47364|2451263|6721|40917|915281|1292|32056|40917|915281|1292|32056|44|8|7|4|41|139|53|32.23|93.14|24.21|3653.29|1283.13|1708.19|4936.42|89.81|0.00|2171.94|1283.13|1372.94|3455.07|3544.88|-425.06| +2451173|47364|2451243|9139|40917|915281|1292|32056|40917|915281|1292|32056|16|2|5|3|34|139|64|33.08|47.96|24.93|1473.92|1595.52|2117.12|3069.44|143.59|0.00|429.44|1595.52|1739.11|2024.96|2168.55|-521.60| +2451173|47364|2451192|7216|40917|915281|1292|32056|40917|915281|1292|32056|46|13|13|1|272|139|79|83.61|156.35|50.03|8399.28|3952.37|6605.19|12351.65|118.57|0.00|3087.32|3952.37|4070.94|7039.69|7158.26|-2652.82| +2451173|47364|2451268|13478|40917|915281|1292|32056|40917|915281|1292|32056|37|7|9|1|236|139|55|74.49|172.81|31.10|7794.05|1710.50|4096.95|9504.55|34.21|0.00|1995.95|1710.50|1744.71|3706.45|3740.66|-2386.45| +2451173|47364|2451210|14204|40917|915281|1292|32056|40917|915281|1292|32056|28|26|16|3|237|139|46|32.49|56.53|25.43|1430.60|1169.78|1494.54|2600.38|40.00|725.26|1144.02|444.52|484.52|1588.54|1628.54|-1050.02| +2451173|47364|2451201|3890|40917|915281|1292|32056|40917|915281|1292|32056|2|14|17|1|35|139|21|69.46|166.00|107.90|1220.10|2265.90|1458.66|3486.00|203.93|0.00|732.06|2265.90|2469.83|2997.96|3201.89|807.24| +2451173|47364|2451256|14821|40917|915281|1292|32056|40917|915281|1292|32056|19|8|17|2|27|139|42|71.14|92.48|41.61|2136.54|1747.62|2987.88|3884.16|0.00|0.00|660.24|1747.62|1747.62|2407.86|2407.86|-1240.26| +2451173|47364|2451287|2845|40917|915281|1292|32056|40917|915281|1292|32056|44|13|14|3|252|139|89|78.86|178.22|39.20|12372.78|3488.80|7018.54|15861.58|104.66|0.00|6661.65|3488.80|3593.46|10150.45|10255.11|-3529.74| +2451173|47364|2451270|9643|40917|915281|1292|32056|40917|915281|1292|32056|34|26|10|1|279|139|42|79.46|205.80|125.53|3371.34|5272.26|3337.32|8643.60|210.89|0.00|345.66|5272.26|5483.15|5617.92|5828.81|1934.94| +2451276|70169|2451310|12652|63405|1704417|5076|37288|40863|1306557|6818|8230|38|14|16|4|122|140|87|11.37|32.51|16.25|1414.62|1413.75|989.19|2828.37|56.55|0.00|678.60|1413.75|1470.30|2092.35|2148.90|424.56| +2451276|70169|2451320|10705|63405|1704417|5076|37288|40863|1306557|6818|8230|7|4|12|1|30|140|78|35.29|82.22|15.62|5194.80|1218.36|2752.62|6413.16|4.14|804.11|705.12|414.25|418.39|1119.37|1123.51|-2338.37| +2451276|70169|2451319|6464|63405|1704417|5076|37288|40863|1306557|6818|8230|31|14|13|5|37|140|38|38.53|108.26|51.96|2139.40|1974.48|1464.14|4113.88|98.72|0.00|1727.48|1974.48|2073.20|3701.96|3800.68|510.34| +2451276|70169|2451379|2666|63405|1704417|5076|37288|40863|1306557|6818|8230|49|19|12|2|249|140|58|66.57|153.11|102.58|2930.74|5949.64|3861.06|8880.38|178.48|0.00|1331.68|5949.64|6128.12|7281.32|7459.80|2088.58| +2451276|70169|2451342|9808|63405|1704417|5076|37288|40863|1306557|6818|8230|4|2|1|5|149|140|15|48.04|113.37|91.82|323.25|1377.30|720.60|1700.55|68.86|0.00|510.15|1377.30|1446.16|1887.45|1956.31|656.70| +2451276|70169|2451358|541|63405|1704417|5076|37288|40863|1306557|6818|8230|2|2|19|1|233|140|77|39.10|91.88|13.78|6013.70|1061.06|3010.70|7074.76|74.27|0.00|1272.81|1061.06|1135.33|2333.87|2408.14|-1949.64| +2451276|70169|2451282|1598|63405|1704417|5076|37288|40863|1306557|6818|8230|37|16|19|4|159|140|84|35.57|49.79|19.91|2509.92|1672.44|2987.88|4182.36|133.79|0.00|1463.28|1672.44|1806.23|3135.72|3269.51|-1315.44| +2451276|70169|2451366|7411|63405|1704417|5076|37288|40863|1306557|6818|8230|44|26|15|1|77|140|96|2.23|4.79|4.69|9.60|450.24|214.08|459.84|31.51|0.00|8.64|450.24|481.75|458.88|490.39|236.16| +2451276|70169|2451333|8707|63405|1704417|5076|37288|40863|1306557|6818|8230|55|26|10|3|189|140|74|3.13|7.35|3.67|272.32|271.58|231.62|543.90|10.86|0.00|195.36|271.58|282.44|466.94|477.80|39.96| +2451522|36372|2451625|1262|44953|692145|2184|14623|7461|1798094|1540|18732|20|14|2|4|126|141|87|74.94|110.16|76.01|2971.05|6612.87|6519.78|9583.92|66.12|0.00|1149.27|6612.87|6678.99|7762.14|7828.26|93.09| +2451522|36372|2451524|4552|44953|692145|2184|14623|7461|1798094|1540|18732|4|26|18|2|153|141|24|28.83|76.97|59.26|425.04|1422.24|691.92|1847.28|113.77|0.00|683.28|1422.24|1536.01|2105.52|2219.29|730.32| +2451522|36372|2451625|7510|44953|692145|2184|14623|7461|1798094|1540|18732|26|19|19|3|74|141|11|15.19|40.25|23.34|186.01|256.74|167.09|442.75|15.40|0.00|17.71|256.74|272.14|274.45|289.85|89.65| +2451522|36372|2451570|2588|44953|692145|2184|14623|7461|1798094|1540|18732|1|4|18|5|62|141|74|5.52|6.62|4.56|152.44|337.44|408.48|489.88|23.62|0.00|34.04|337.44|361.06|371.48|395.10|-71.04| +2451522|36372|2451562|17714|44953|692145|2184|14623|7461|1798094|1540|18732|38|22|4|1|138|141|75|27.37|59.94|25.77|2562.75|1932.75|2052.75|4495.50|154.62|0.00|1213.50|1932.75|2087.37|3146.25|3300.87|-120.00| +2451522|36372|2451588|14450|44953|692145|2184|14623|7461|1798094|1540|18732|37|4|7|1|255|141|3|16.66|28.65|19.19|28.38|57.57|49.98|85.95|2.87|0.00|12.87|57.57|60.44|70.44|73.31|7.59| +2451522|36372|2451593|15673|44953|692145|2184|14623|7461|1798094|1540|18732|37|20|3|2|24|141|37|19.78|45.88|6.42|1460.02|237.54|731.86|1697.56|19.00|0.00|50.69|237.54|256.54|288.23|307.23|-494.32| +2451522|36372|2451541|12982|44953|692145|2184|14623|7461|1798094|1540|18732|28|13|2|4|36|141|45|1.66|3.05|1.40|74.25|63.00|74.70|137.25|5.04|0.00|10.80|63.00|68.04|73.80|78.84|-11.70| +2451522|36372|2451593|11134|44953|692145|2184|14623|7461|1798094|1540|18732|56|26|2|5|15|141|70|44.92|125.77|114.45|792.40|8011.50|3144.40|8803.90|560.80|0.00|3345.30|8011.50|8572.30|11356.80|11917.60|4867.10| +2451522|36372|2451638|13561|44953|692145|2184|14623|7461|1798094|1540|18732|43|20|10|4|68|141|25|90.59|228.28|86.74|3538.50|2168.50|2264.75|5707.00|130.11|0.00|456.50|2168.50|2298.61|2625.00|2755.11|-96.25| +2451522|36372|2451628|8611|44953|692145|2184|14623|7461|1798094|1540|18732|43|4|17|3|299|141|38|38.48|114.67|41.28|2788.82|1568.64|1462.24|4357.46|141.17|0.00|1699.36|1568.64|1709.81|3268.00|3409.17|106.40| +2451522|36372|2451599|10580|44953|692145|2184|14623|7461|1798094|1540|18732|37|13|5|1|234|141|14|6.52|16.88|10.63|87.50|148.82|91.28|236.32|4.46|0.00|63.70|148.82|153.28|212.52|216.98|57.54| +2451522|36372|2451626|4174|44953|692145|2184|14623|7461|1798094|1540|18732|46|25|12|1|130|141|12|46.93|109.81|105.41|52.80|1264.92|563.16|1317.72|41.11|442.72|632.40|822.20|863.31|1454.60|1495.71|259.04| +2451463|59944|2451553|12019|75797|1297229|4708|26540|95939|1630780|6883|16315|10|2|8|4|163|142|68|59.12|112.32|44.92|4583.20|3054.56|4020.16|7637.76|183.27|0.00|3054.56|3054.56|3237.83|6109.12|6292.39|-965.60| +2451463|59944|2451553|14630|75797|1297229|4708|26540|95939|1630780|6883|16315|14|28|9|1|283|142|78|13.98|32.15|13.82|1429.74|1077.96|1090.44|2507.70|43.11|0.00|475.80|1077.96|1121.07|1553.76|1596.87|-12.48| +2451463|59944|2451474|2473|75797|1297229|4708|26540|95939|1630780|6883|16315|14|13|6|5|154|142|39|43.85|71.91|38.11|1318.20|1486.29|1710.15|2804.49|118.90|0.00|532.74|1486.29|1605.19|2019.03|2137.93|-223.86| +2451463|59944|2451507|11299|75797|1297229|4708|26540|95939|1630780|6883|16315|46|13|20|3|196|142|52|2.87|3.09|2.93|8.32|152.36|149.24|160.68|13.71|0.00|10.92|152.36|166.07|163.28|176.99|3.12| +2451463|59944|2451519|9601|75797|1297229|4708|26540|95939|1630780|6883|16315|46|25|15|4|296|142|78|42.17|125.66|106.81|1470.30|8331.18|3289.26|9801.48|83.31|0.00|195.78|8331.18|8414.49|8526.96|8610.27|5041.92| +2451463|59944|2451551|11623|75797|1297229|4708|26540|95939|1630780|6883|16315|56|4|4|3|269|142|6|22.31|32.34|15.19|102.90|91.14|133.86|194.04|7.29|0.00|56.22|91.14|98.43|147.36|154.65|-42.72| +2451463|59944|2451485|10672|75797|1297229|4708|26540|95939|1630780|6883|16315|52|16|8|5|29|142|5|9.97|10.56|4.75|29.05|23.75|49.85|52.80|1.18|0.00|10.00|23.75|24.93|33.75|34.93|-26.10| +2451463|59944|2451493|1108|75797|1297229|4708|26540|95939|1630780|6883|16315|55|1|1|4|144|142|70|55.02|79.22|16.63|4381.30|1164.10|3851.40|5545.40|81.48|0.00|387.80|1164.10|1245.58|1551.90|1633.38|-2687.30| +2451463|59944|2451558|4195|75797|1297229|4708|26540|95939|1630780|6883|16315|28|1|7|3|56|142|71|62.04|67.00|8.71|4138.59|618.41|4404.84|4757.00|49.47|0.00|428.13|618.41|667.88|1046.54|1096.01|-3786.43| +2450865|48676|2450952|17311|48483|754001|5119|47884|18240|1747351|5238|19203|56|26|12|2|292|143|64|4.19|10.81|7.13|235.52|456.32|268.16|691.84|36.50|0.00|145.28|456.32|492.82|601.60|638.10|188.16| +2450865|48676|2450952|17068|48483|754001|5119|47884|18240|1747351|5238|19203|28|26|10|4|258|143|93|81.48|204.51|14.31|17688.60|1330.83|7577.64|19019.43|39.92|0.00|3993.42|1330.83|1370.75|5324.25|5364.17|-6246.81| +2450865|48676|2450967|12394|48483|754001|5119|47884|18240|1747351|5238|19203|56|16|9|4|129|143|62|71.91|210.69|202.26|522.66|12540.12|4458.42|13062.78|250.80|0.00|5485.76|12540.12|12790.92|18025.88|18276.68|8081.70| +2450865|48676|2450980|16090|48483|754001|5119|47884|18240|1747351|5238|19203|56|1|13|2|284|143|32|83.92|227.42|13.64|6840.96|436.48|2685.44|7277.44|39.28|0.00|3201.92|436.48|475.76|3638.40|3677.68|-2248.96| +2450865|48676|2450896|10312|48483|754001|5119|47884|18240|1747351|5238|19203|25|1|16|4|159|143|33|33.26|64.52|27.09|1235.19|893.97|1097.58|2129.16|35.75|0.00|127.71|893.97|929.72|1021.68|1057.43|-203.61| +2450865|48676|2450899|6703|48483|754001|5119|47884|18240|1747351|5238|19203|56|14|20|4|201|143|19|91.91|163.59|75.25|1678.46|1429.75|1746.29|3108.21|42.89|0.00|497.23|1429.75|1472.64|1926.98|1969.87|-316.54| +2450865|48676|2450908|13696|48483|754001|5119|47884|18240|1747351|5238|19203|40|7|14|2|20|143|81|34.99|62.63|43.84|1521.99|3551.04|2834.19|5073.03|71.02|0.00|101.25|3551.04|3622.06|3652.29|3723.31|716.85| +2450865|48676|2450896|11968|48483|754001|5119|47884|18240|1747351|5238|19203|38|16|18|2|269|143|39|36.47|97.01|35.89|2383.68|1399.71|1422.33|3783.39|0.00|0.00|1702.35|1399.71|1399.71|3102.06|3102.06|-22.62| +2450865|48676|2450947|11644|48483|754001|5119|47884|18240|1747351|5238|19203|26|25|15|4|296|143|82|19.02|34.99|17.84|1406.30|1462.88|1559.64|2869.18|87.77|0.00|400.98|1462.88|1550.65|1863.86|1951.63|-96.76| +2450865|48676|2450929|14563|48483|754001|5119|47884|18240|1747351|5238|19203|2|22|15|3|238|143|79|11.97|27.53|9.91|1391.98|782.89|945.63|2174.87|28.18|78.28|43.45|704.61|732.79|748.06|776.24|-241.02| +2450865|48676|2450965|4063|48483|754001|5119|47884|18240|1747351|5238|19203|55|19|15|5|272|143|64|77.83|87.94|43.09|2870.40|2757.76|4981.12|5628.16|55.15|0.00|900.48|2757.76|2812.91|3658.24|3713.39|-2223.36| +2450865|48676|2450941|11176|48483|754001|5119|47884|18240|1747351|5238|19203|37|14|17|1|88|143|93|44.27|86.76|78.08|807.24|7261.44|4117.11|8068.68|290.45|0.00|1451.73|7261.44|7551.89|8713.17|9003.62|3144.33| +2450865|48676|2450893|415|48483|754001|5119|47884|18240|1747351|5238|19203|37|13|5|5|98|143|30|90.50|174.66|113.52|1834.20|3405.60|2715.00|5239.80|4.76|3167.20|2148.30|238.40|243.16|2386.70|2391.46|-2476.60| +2452386|42051|2452464|8028|78725|1440340|355|7970|2997|582809|2015|13630|49|13|9|1|193|144|77|8.94|26.46|1.32|1935.78|101.64|688.38|2037.42|3.04|0.00|40.04|101.64|104.68|141.68|144.72|-586.74| +2452386|42051|2452427|15504|78725|1440340|355|7970|2997|582809|2015|13630|37|3|12|5|25|144|23|91.34|203.68|85.54|2717.22|1967.42|2100.82|4684.64|157.39|0.00|936.79|1967.42|2124.81|2904.21|3061.60|-133.40| +2452386|42051|2452487|216|78725|1440340|355|7970|2997|582809|2015|13630|49|7|3|2|151|144|21|4.86|8.35|0.41|166.74|8.61|102.06|175.35|0.51|0.00|13.86|8.61|9.12|22.47|22.98|-93.45| +2452386|42051|2452387|11215|78725|1440340|355|7970|2997|582809|2015|13630|31|3|15|1|274|144|31|12.67|33.32|22.65|330.77|702.15|392.77|1032.92|21.06|0.00|51.46|702.15|723.21|753.61|774.67|309.38| +2452386|42051|2452395|6241|78725|1440340|355|7970|2997|582809|2015|13630|33|12|5|1|160|144|31|11.77|15.18|9.71|169.57|301.01|364.87|470.58|6.02|0.00|42.16|301.01|307.03|343.17|349.19|-63.86| +2452386|42051|2452437|15019|78725|1440340|355|7970|2997|582809|2015|13630|36|27|10|1|32|144|50|95.12|155.99|49.91|5304.00|2495.50|4756.00|7799.50|49.91|0.00|3743.50|2495.50|2545.41|6239.00|6288.91|-2260.50| +2452386|42051|2452471|12949|78725|1440340|355|7970|2997|582809|2015|13630|18|18|19|4|219|144|75|9.65|15.44|5.40|753.00|405.00|723.75|1158.00|24.30|0.00|57.75|405.00|429.30|462.75|487.05|-318.75| +2452386|42051|2452405|13543|78725|1440340|355|7970|2997|582809|2015|13630|19|3|7|2|144|144|62|51.50|87.55|18.38|4288.54|1139.56|3193.00|5428.10|68.37|0.00|759.50|1139.56|1207.93|1899.06|1967.43|-2053.44| +2452386|42051|2452443|14262|78725|1440340|355|7970|2997|582809|2015|13630|18|19|14|5|156|144|41|6.88|11.35|11.35|0.00|465.35|282.08|465.35|27.92|0.00|32.39|465.35|493.27|497.74|525.66|183.27| +2452386|42051|2452434|2893|78725|1440340|355|7970|2997|582809|2015|13630|3|21|10|4|98|144|73|84.75|207.63|99.66|7881.81|7275.18|6186.75|15156.99|582.01|0.00|3637.59|7275.18|7857.19|10912.77|11494.78|1088.43| +2452386|42051|2452449|12606|78725|1440340|355|7970|2997|582809|2015|13630|13|18|1|5|142|144|30|54.48|71.91|22.29|1488.60|668.70|1634.40|2157.30|40.12|0.00|819.60|668.70|708.82|1488.30|1528.42|-965.70| +2452386|42051|2452485|8877|78725|1440340|355|7970|2997|582809|2015|13630|49|6|18|3|168|144|73|89.92|196.02|3.92|14023.30|286.16|6564.16|14309.46|2.74|240.37|4006.24|45.79|48.53|4052.03|4054.77|-6518.37| +2452386|42051|2452447|13167|78725|1440340|355|7970|2997|582809|2015|13630|36|19|12|4|136|144|64|4.02|6.75|2.83|250.88|181.12|257.28|432.00|7.24|0.00|42.88|181.12|188.36|224.00|231.24|-76.16| +2451068|14277|2451102|3697|52028|1029431|3634|34237|20211|1754034|5693|27496|31|20|19|4|229|145|76|59.50|95.79|83.33|946.96|6333.08|4522.00|7280.04|63.33|0.00|1237.28|6333.08|6396.41|7570.36|7633.69|1811.08| +2451068|14277|2451120|12298|52028|1029431|3634|34237|20211|1754034|5693|27496|50|14|13|3|203|145|67|9.11|9.56|5.16|294.80|345.72|610.37|640.52|3.45|0.00|198.32|345.72|349.17|544.04|547.49|-264.65| +2451068|14277|2451074|17390|52028|1029431|3634|34237|20211|1754034|5693|27496|43|22|9|1|153|145|100|38.46|111.53|14.49|9704.00|1449.00|3846.00|11153.00|28.98|0.00|2342.00|1449.00|1477.98|3791.00|3819.98|-2397.00| +2451068|14277|2451185|3716|52028|1029431|3634|34237|20211|1754034|5693|27496|10|10|9|4|299|145|50|93.97|205.79|109.06|4836.50|5453.00|4698.50|10289.50|104.69|4144.28|411.50|1308.72|1413.41|1720.22|1824.91|-3389.78| +2451068|14277|2451117|1720|52028|1029431|3634|34237|20211|1754034|5693|27496|16|26|2|2|82|145|26|3.60|3.85|0.96|75.14|24.96|93.60|100.10|2.24|0.00|40.04|24.96|27.20|65.00|67.24|-68.64| +2451068|14277|2451185|14012|52028|1029431|3634|34237|20211|1754034|5693|27496|46|14|12|3|57|145|55|19.84|51.98|31.18|1144.00|1714.90|1091.20|2858.90|85.74|0.00|1343.65|1714.90|1800.64|3058.55|3144.29|623.70| +2451068|14277|2451088|7774|52028|1029431|3634|34237|20211|1754034|5693|27496|40|26|20|1|9|145|51|77.38|214.34|137.17|3935.67|6995.67|3946.38|10931.34|629.61|0.00|4591.02|6995.67|7625.28|11586.69|12216.30|3049.29| +2451068|14277|2451110|17458|52028|1029431|3634|34237|20211|1754034|5693|27496|34|14|8|3|248|145|21|80.69|238.84|40.60|4163.04|852.60|1694.49|5015.64|34.10|0.00|852.60|852.60|886.70|1705.20|1739.30|-841.89| +2451068|14277|2451168|13184|52028|1029431|3634|34237|20211|1754034|5693|27496|58|8|20|5|200|145|16|89.39|151.96|110.93|656.48|1774.88|1430.24|2431.36|124.24|0.00|170.08|1774.88|1899.12|1944.96|2069.20|344.64| +2451785|61675|2451879|11312|46466|649270|874|48878|59133|444907|5690|2094|17|19|4|5|209|146|83|29.39|76.12|22.83|4423.07|1894.89|2439.37|6317.96|113.69|0.00|2337.28|1894.89|2008.58|4232.17|4345.86|-544.48| +2451785|61675|2451902|1121|46466|649270|874|48878|59133|444907|5690|2094|11|29|17|1|215|146|95|20.77|43.40|42.09|124.45|3998.55|1973.15|4123.00|39.98|0.00|164.35|3998.55|4038.53|4162.90|4202.88|2025.40| +2451785|61675|2451893|9053|46466|649270|874|48878|59133|444907|5690|2094|29|2|19|1|203|146|58|24.01|51.14|2.55|2818.22|147.90|1392.58|2966.12|10.35|0.00|948.88|147.90|158.25|1096.78|1107.13|-1244.68| +2451785|61675|2451878|15176|46466|649270|874|48878|59133|444907|5690|2094|50|14|10|2|134|146|49|56.13|107.20|24.65|4044.95|1207.85|2750.37|5252.80|60.39|0.00|1470.49|1207.85|1268.24|2678.34|2738.73|-1542.52| +2451785|61675|2451900|13957|46466|649270|874|48878|59133|444907|5690|2094|23|13|2|2|292|146|85|39.05|40.22|7.23|2804.15|614.55|3319.25|3418.70|36.87|0.00|717.40|614.55|651.42|1331.95|1368.82|-2704.70| +2451785|61675|2451802|1115|46466|649270|874|48878|59133|444907|5690|2094|38|29|10|3|203|146|88|24.18|35.54|28.07|657.36|2470.16|2127.84|3127.52|28.65|1753.81|1219.68|716.35|745.00|1936.03|1964.68|-1411.49| +2451785|61675|2451803|17840|46466|649270|874|48878|59133|444907|5690|2094|47|14|12|4|133|146|29|30.18|37.12|26.72|301.60|774.88|875.22|1076.48|69.73|0.00|42.92|774.88|844.61|817.80|887.53|-100.34| +2451785|61675|2451867|5261|46466|649270|874|48878|59133|444907|5690|2094|11|11|16|3|193|146|13|36.18|101.66|55.91|594.75|726.83|470.34|1321.58|43.60|0.00|647.53|726.83|770.43|1374.36|1417.96|256.49| +2451785|61675|2451861|14900|46466|649270|874|48878|59133|444907|5690|2094|25|11|19|2|135|146|62|19.79|47.49|11.87|2208.44|735.94|1226.98|2944.38|44.15|0.00|823.98|735.94|780.09|1559.92|1604.07|-491.04| +2451785|61675|2451875|185|46466|649270|874|48878|59133|444907|5690|2094|47|2|3|4|65|146|67|28.65|79.64|46.19|2241.15|3094.73|1919.55|5335.88|123.78|0.00|853.58|3094.73|3218.51|3948.31|4072.09|1175.18| +2451785|61675|2451839|445|46466|649270|874|48878|59133|444907|5690|2094|41|14|5|2|59|146|58|81.38|116.37|60.51|3239.88|3509.58|4720.04|6749.46|140.38|0.00|607.26|3509.58|3649.96|4116.84|4257.22|-1210.46| +2451785|61675|2451865|17881|46466|649270|874|48878|59133|444907|5690|2094|35|17|20|3|109|146|47|28.66|77.66|66.01|547.55|3102.47|1347.02|3650.02|217.17|0.00|1751.69|3102.47|3319.64|4854.16|5071.33|1755.45| +2451785|61675|2451830|11513|46466|649270|874|48878|59133|444907|5690|2094|35|13|19|3|237|146|43|8.54|15.37|7.53|337.12|323.79|367.22|660.91|16.18|0.00|105.35|323.79|339.97|429.14|445.32|-43.43| +2451824|24644|2451847|8965|72367|462290|1813|11336|82689|1214313|3905|46628|23|7|6|1|153|147|64|1.62|2.49|1.64|54.40|104.96|103.68|159.36|5.24|0.00|31.36|104.96|110.20|136.32|141.56|1.28| +2451824|24644|2451940|6566|72367|462290|1813|11336|82689|1214313|3905|46628|47|7|7|5|20|147|92|41.64|117.00|52.65|5920.20|4843.80|3830.88|10764.00|48.43|0.00|2045.16|4843.80|4892.23|6888.96|6937.39|1012.92| +2451824|24644|2451853|14269|72367|462290|1813|11336|82689|1214313|3905|46628|11|11|16|3|283|147|97|68.73|155.32|90.08|6328.28|8737.76|6666.81|15066.04|87.37|0.00|6628.98|8737.76|8825.13|15366.74|15454.11|2070.95| +2451824|24644|2451904|236|72367|462290|1813|11336|82689|1214313|3905|46628|23|2|5|5|205|147|14|19.30|47.86|30.63|241.22|428.82|270.20|670.04|1.24|304.46|314.86|124.36|125.60|439.22|440.46|-145.84| +2451824|24644|2451832|9421|72367|462290|1813|11336|82689|1214313|3905|46628|1|13|13|4|295|147|38|38.83|85.81|80.66|195.70|3065.08|1475.54|3260.78|153.25|0.00|97.66|3065.08|3218.33|3162.74|3315.99|1589.54| +2451824|24644|2451827|15385|72367|462290|1813|11336|82689|1214313|3905|46628|32|26|1|3|72|147|58|55.64|105.15|5.25|5794.20|304.50|3227.12|6098.70|27.40|0.00|2439.48|304.50|331.90|2743.98|2771.38|-2922.62| +2451824|24644|2451885|9596|72367|462290|1813|11336|82689|1214313|3905|46628|37|17|16|1|16|147|61|75.98|171.71|84.13|5342.38|5131.93|4634.78|10474.31|256.59|0.00|4294.40|5131.93|5388.52|9426.33|9682.92|497.15| +2451824|24644|2451923|371|72367|462290|1813|11336|82689|1214313|3905|46628|7|1|1|3|188|147|72|13.65|23.75|14.48|667.44|1042.56|982.80|1710.00|58.38|208.51|700.56|834.05|892.43|1534.61|1592.99|-148.75| +2451824|24644|2451868|6662|72367|462290|1813|11336|82689|1214313|3905|46628|55|8|5|5|72|147|30|30.92|61.22|28.16|991.80|844.80|927.60|1836.60|25.34|0.00|128.40|844.80|870.14|973.20|998.54|-82.80| +2451824|24644|2451884|6434|72367|462290|1813|11336|82689|1214313|3905|46628|44|14|13|3|173|147|73|52.47|123.30|96.17|1980.49|7020.41|3830.31|9000.90|351.02|0.00|3690.15|7020.41|7371.43|10710.56|11061.58|3190.10| +2451824|24644|2451862|5291|72367|462290|1813|11336|82689|1214313|3905|46628|59|7|15|1|60|147|25|75.49|98.89|8.90|2249.75|222.50|1887.25|2472.25|12.77|40.05|346.00|182.45|195.22|528.45|541.22|-1704.80| +2451824|24644|2451853|5635|72367|462290|1813|11336|82689|1214313|3905|46628|43|13|2|2|292|147|19|75.50|221.97|128.74|1771.37|2446.06|1434.50|4217.43|220.14|0.00|1813.36|2446.06|2666.20|4259.42|4479.56|1011.56| +2451824|24644|2451848|3431|72367|462290|1813|11336|82689|1214313|3905|46628|23|29|11|4|250|147|89|24.60|58.05|6.96|4547.01|619.44|2189.40|5166.45|43.36|0.00|1704.35|619.44|662.80|2323.79|2367.15|-1569.96| +2452278|80559|2452283|16039|72570|1224887|5651|25215|25264|1016460|2307|40271|54|27|14|1|208|148|26|19.02|43.17|13.38|774.54|347.88|494.52|1122.42|31.30|0.00|100.88|347.88|379.18|448.76|480.06|-146.64| +2452278|80559|2452394|8241|72570|1224887|5651|25215|25264|1016460|2307|40271|9|1|4|2|153|148|52|66.94|101.07|39.41|3206.32|2049.32|3480.88|5255.64|102.46|0.00|472.68|2049.32|2151.78|2522.00|2624.46|-1431.56| +2452278|80559|2452298|9523|72570|1224887|5651|25215|25264|1016460|2307|40271|9|12|7|5|106|148|81|52.70|125.42|104.09|1727.73|8431.29|4268.70|10159.02|505.87|0.00|2031.48|8431.29|8937.16|10462.77|10968.64|4162.59| +2452278|80559|2452281|9307|72570|1224887|5651|25215|25264|1016460|2307|40271|51|21|16|1|56|148|6|73.22|199.15|119.49|477.96|716.94|439.32|1194.90|50.18|0.00|226.98|716.94|767.12|943.92|994.10|277.62| +2452278|80559|2452313|7068|72570|1224887|5651|25215|25264|1016460|2307|40271|18|9|8|4|60|148|42|77.60|176.92|102.61|3121.02|4309.62|3259.20|7430.64|0.00|0.00|2080.26|4309.62|4309.62|6389.88|6389.88|1050.42| +2452278|80559|2452330|13345|72570|1224887|5651|25215|25264|1016460|2307|40271|55|18|3|4|5|148|55|87.65|255.93|174.03|4504.50|9571.65|4820.75|14076.15|66.04|7370.17|5630.35|2201.48|2267.52|7831.83|7897.87|-2619.27| +2452278|80559|2452300|9567|72570|1224887|5651|25215|25264|1016460|2307|40271|18|7|16|1|56|148|76|87.68|207.80|116.36|6949.44|8843.36|6663.68|15792.80|795.90|0.00|7106.76|8843.36|9639.26|15950.12|16746.02|2179.68| +2452278|80559|2452299|15487|72570|1224887|5651|25215|25264|1016460|2307|40271|42|24|13|4|75|148|75|50.58|95.09|82.72|927.75|6204.00|3793.50|7131.75|186.12|0.00|1425.75|6204.00|6390.12|7629.75|7815.87|2410.50| +2452278|80559|2452378|13525|72570|1224887|5651|25215|25264|1016460|2307|40271|51|9|14|3|62|148|2|37.21|94.88|83.49|22.78|166.98|74.42|189.76|2.27|53.43|94.88|113.55|115.82|208.43|210.70|39.13| +2452278|80559|2452310|8016|72570|1224887|5651|25215|25264|1016460|2307|40271|15|25|18|3|201|148|28|53.87|142.75|44.25|2758.00|1239.00|1508.36|3997.00|99.12|0.00|1398.88|1239.00|1338.12|2637.88|2737.00|-269.36| +2452278|80559|2452397|4131|72570|1224887|5651|25215|25264|1016460|2307|40271|54|6|6|2|65|148|75|69.71|71.10|62.56|640.50|4692.00|5228.25|5332.50|71.31|3800.52|1706.25|891.48|962.79|2597.73|2669.04|-4336.77| +2452278|80559|2452377|12385|72570|1224887|5651|25215|25264|1016460|2307|40271|48|7|1|1|48|148|71|64.64|85.32|29.00|3998.72|2059.00|4589.44|6057.72|144.13|0.00|1574.78|2059.00|2203.13|3633.78|3777.91|-2530.44| +2452278|80559|2452328|17436|72570|1224887|5651|25215|25264|1016460|2307|40271|15|7|10|3|59|148|98|73.78|110.67|52.01|5748.68|5096.98|7230.44|10845.66|407.75|0.00|650.72|5096.98|5504.73|5747.70|6155.45|-2133.46| +2452278|80559|2452299|5767|72570|1224887|5651|25215|25264|1016460|2307|40271|13|13|7|5|240|148|79|98.03|134.30|37.60|7639.30|2970.40|7744.37|10609.70|0.00|0.00|4561.46|2970.40|2970.40|7531.86|7531.86|-4773.97| +2452278|80559|2452290|2607|72570|1224887|5651|25215|25264|1016460|2307|40271|49|15|18|3|68|148|40|37.50|58.12|39.52|744.00|1580.80|1500.00|2324.80|11.85|1343.68|232.40|237.12|248.97|469.52|481.37|-1262.88| +2452278|80559|2452347|6765|72570|1224887|5651|25215|25264|1016460|2307|40271|15|12|1|1|14|148|53|81.67|113.52|28.38|4512.42|1504.14|4328.51|6016.56|0.00|0.00|1323.41|1504.14|1504.14|2827.55|2827.55|-2824.37| +2452026|73416|2452058|12235|52162|1509084|406|29375|92842|110073|1788|21110|15|29|19|3|177|149|66|67.22|190.23|57.06|8789.22|3765.96|4436.52|12555.18|188.29|0.00|2259.84|3765.96|3954.25|6025.80|6214.09|-670.56| +2452026|73416|2452103|4825|52162|1509084|406|29375|92842|110073|1788|21110|5|21|16|1|146|149|43|6.46|11.49|6.20|227.47|266.60|277.78|494.07|5.33|0.00|212.42|266.60|271.93|479.02|484.35|-11.18| +2452026|73416|2452132|9105|52162|1509084|406|29375|92842|110073|1788|21110|39|7|8|2|286|149|45|8.61|10.41|8.84|70.65|397.80|387.45|468.45|19.89|0.00|9.00|397.80|417.69|406.80|426.69|10.35| +2452026|73416|2452045|10727|52162|1509084|406|29375|92842|110073|1788|21110|21|13|3|2|228|149|33|96.61|208.67|98.07|3649.80|3236.31|3188.13|6886.11|64.72|0.00|2341.02|3236.31|3301.03|5577.33|5642.05|48.18| +2452026|73416|2452136|15803|52162|1509084|406|29375|92842|110073|1788|21110|57|3|18|1|124|149|26|31.72|80.56|63.64|439.92|1654.64|824.72|2094.56|33.09|0.00|230.36|1654.64|1687.73|1885.00|1918.09|829.92| +2452026|73416|2452095|17695|52162|1509084|406|29375|92842|110073|1788|21110|31|13|8|4|190|149|54|38.77|51.17|23.02|1520.10|1243.08|2093.58|2763.18|62.15|0.00|1049.76|1243.08|1305.23|2292.84|2354.99|-850.50| +2452026|73416|2452134|11739|52162|1509084|406|29375|92842|110073|1788|21110|1|23|13|4|255|149|42|98.65|155.86|34.28|5106.36|1439.76|4143.30|6546.12|55.43|647.89|3142.02|791.87|847.30|3933.89|3989.32|-3351.43| +2452026|73416|2452094|6223|52162|1509084|406|29375|92842|110073|1788|21110|37|5|17|1|262|149|88|7.73|20.79|1.66|1683.44|146.08|680.24|1829.52|1.46|0.00|36.08|146.08|147.54|182.16|183.62|-534.16| +2452026|73416|2452130|14365|52162|1509084|406|29375|92842|110073|1788|21110|33|11|5|4|93|149|93|95.37|143.05|111.57|2927.64|10376.01|8869.41|13303.65|76.78|6536.88|5321.46|3839.13|3915.91|9160.59|9237.37|-5030.28| +2452233|70530|2452295|9553|89648|385896|4991|48086|89648|385896|4991|48086|13|21|15|2|136|150|13|57.27|166.65|96.65|910.00|1256.45|744.51|2166.45|62.82|0.00|433.29|1256.45|1319.27|1689.74|1752.56|511.94| +2452233|70530|2452271|1937|89648|385896|4991|48086|89648|385896|4991|48086|39|23|7|4|221|150|69|52.09|105.22|21.04|5808.42|1451.76|3594.21|7260.18|14.51|0.00|2177.64|1451.76|1466.27|3629.40|3643.91|-2142.45| +2452233|70530|2452278|8217|89648|385896|4991|48086|89648|385896|4991|48086|3|29|8|1|264|150|91|44.81|94.99|47.49|4322.50|4321.59|4077.71|8644.09|172.86|0.00|1209.39|4321.59|4494.45|5530.98|5703.84|243.88| +2452233|70530|2452308|2547|89648|385896|4991|48086|89648|385896|4991|48086|25|25|3|5|290|150|43|50.36|88.13|53.75|1478.34|2311.25|2165.48|3789.59|184.90|0.00|264.88|2311.25|2496.15|2576.13|2761.03|145.77| +2452233|70530|2452300|12485|89648|385896|4991|48086|89648|385896|4991|48086|7|29|6|2|198|150|3|36.53|81.46|78.20|9.78|234.60|109.59|244.38|9.38|0.00|122.19|234.60|243.98|356.79|366.17|125.01| +2452233|70530|2452351|7499|89648|385896|4991|48086|89648|385896|4991|48086|43|27|16|3|87|150|1|30.72|90.00|48.60|41.40|48.60|30.72|90.00|3.88|0.00|20.70|48.60|52.48|69.30|73.18|17.88| +2452233|70530|2452273|17169|89648|385896|4991|48086|89648|385896|4991|48086|11|13|18|4|3|150|53|61.79|164.36|37.80|6707.68|2003.40|3274.87|8711.08|40.06|0.00|2700.35|2003.40|2043.46|4703.75|4743.81|-1271.47| +2452233|70530|2452305|7593|89648|385896|4991|48086|89648|385896|4991|48086|19|7|8|4|263|150|28|69.76|76.03|14.44|1724.52|404.32|1953.28|2128.84|12.33|157.68|425.60|246.64|258.97|672.24|684.57|-1706.64| +2452233|70530|2452351|307|89648|385896|4991|48086|89648|385896|4991|48086|49|17|13|2|141|150|37|98.89|142.40|68.35|2739.85|2528.95|3658.93|5268.80|0.00|252.89|1369.74|2276.06|2276.06|3645.80|3645.80|-1382.87| +2452233|70530|2452254|11821|89648|385896|4991|48086|89648|385896|4991|48086|27|15|2|1|260|150|46|66.21|78.12|36.71|1904.86|1688.66|3045.66|3593.52|38.50|726.12|1760.42|962.54|1001.04|2722.96|2761.46|-2083.12| +2452233|70530|2452246|3265|89648|385896|4991|48086|89648|385896|4991|48086|19|23|11|3|235|150|100|96.52|103.27|58.86|4441.00|5886.00|9652.00|10327.00|176.58|0.00|3304.00|5886.00|6062.58|9190.00|9366.58|-3766.00| +2452233|70530|2452285|12505|89648|385896|4991|48086|89648|385896|4991|48086|51|21|9|2|155|150|100|88.42|104.33|44.86|5947.00|4486.00|8842.00|10433.00|17.04|2781.32|3025.00|1704.68|1721.72|4729.68|4746.72|-7137.32| +2452233|70530|2452237|1865|89648|385896|4991|48086|89648|385896|4991|48086|43|23|12|4|199|150|95|26.98|32.37|22.33|953.80|2121.35|2563.10|3075.15|0.00|0.00|1075.40|2121.35|2121.35|3196.75|3196.75|-441.75| +2452375|40728|2452426|816|72525|810179|6680|5934|57988|1517660|6190|39891|42|12|11|1|291|151|79|21.79|47.28|25.05|1756.17|1978.95|1721.41|3735.12|118.73|0.00|1531.02|1978.95|2097.68|3509.97|3628.70|257.54| +2452375|40728|2452475|8107|72525|810179|6680|5934|57988|1517660|6190|39891|45|1|9|5|198|151|90|67.31|183.08|16.47|14994.90|1482.30|6057.90|16477.20|88.93|0.00|5436.90|1482.30|1571.23|6919.20|7008.13|-4575.60| +2452375|40728|2452417|4458|72525|810179|6680|5934|57988|1517660|6190|39891|36|1|1|1|91|151|24|30.92|57.82|5.78|1248.96|138.72|742.08|1387.68|0.00|0.00|652.08|138.72|138.72|790.80|790.80|-603.36| +2452375|40728|2452433|13675|72525|810179|6680|5934|57988|1517660|6190|39891|24|25|7|1|115|151|80|65.44|184.54|140.25|3543.20|11220.00|5235.20|14763.20|224.40|0.00|3838.40|11220.00|11444.40|15058.40|15282.80|5984.80| +2452375|40728|2452380|14877|72525|810179|6680|5934|57988|1517660|6190|39891|27|21|8|3|205|151|57|78.36|171.60|85.80|4890.60|4890.60|4466.52|9781.20|129.11|3276.70|4792.56|1613.90|1743.01|6406.46|6535.57|-2852.62| +2452375|40728|2452462|9379|72525|810179|6680|5934|57988|1517660|6190|39891|7|1|18|5|22|151|55|5.84|10.04|5.32|259.60|292.60|321.20|552.20|6.67|125.81|115.50|166.79|173.46|282.29|288.96|-154.41| +2452375|40728|2452414|14551|72525|810179|6680|5934|57988|1517660|6190|39891|36|30|15|2|29|151|94|84.50|92.10|74.60|1645.00|7012.40|7943.00|8657.40|210.37|0.00|1731.48|7012.40|7222.77|8743.88|8954.25|-930.60| +2452375|40728|2452470|6343|72525|810179|6680|5934|57988|1517660|6190|39891|49|18|3|2|201|151|36|71.63|73.77|24.34|1779.48|876.24|2578.68|2655.72|52.57|0.00|531.00|876.24|928.81|1407.24|1459.81|-1702.44| +2452375|40728|2452474|17185|72525|810179|6680|5934|57988|1517660|6190|39891|12|24|1|5|219|151|46|54.82|130.47|121.33|420.44|5581.18|2521.72|6001.62|446.49|0.00|0.00|5581.18|6027.67|5581.18|6027.67|3059.46| +2452375|40728|2452495|9439|72525|810179|6680|5934|57988|1517660|6190|39891|54|3|7|4|204|151|55|8.08|15.99|13.59|132.00|747.45|444.40|879.45|2.69|717.55|360.25|29.90|32.59|390.15|392.84|-414.50| +2452375|40728|2452425|5611|72525|810179|6680|5934|57988|1517660|6190|39891|19|18|18|2|119|151|47|45.18|108.88|97.99|511.83|4605.53|2123.46|5117.36|322.38|0.00|665.05|4605.53|4927.91|5270.58|5592.96|2482.07| +2452375|40728|2452429|7650|72525|810179|6680|5934|57988|1517660|6190|39891|48|18|14|4|264|151|99|31.19|69.24|24.92|4387.68|2467.08|3087.81|6854.76|148.02|0.00|2192.85|2467.08|2615.10|4659.93|4807.95|-620.73| +2452375|40728|2452495|4813|72525|810179|6680|5934|57988|1517660|6190|39891|30|27|6|5|27|151|71|40.69|79.34|43.63|2535.41|3097.73|2888.99|5633.14|30.97|0.00|1858.78|3097.73|3128.70|4956.51|4987.48|208.74| +2452208|62293|2452220|3535|38675|1029091|7060|29029|47359|1106649|6136|21305|39|13|7|2|194|152|11|86.87|88.60|79.74|97.46|877.14|955.57|974.60|35.08|0.00|380.05|877.14|912.22|1257.19|1292.27|-78.43| +2452208|62293|2452268|15229|38675|1029091|7060|29029|47359|1106649|6136|21305|21|25|12|4|86|152|36|68.13|198.25|19.82|6423.48|713.52|2452.68|7137.00|57.08|0.00|2569.32|713.52|770.60|3282.84|3339.92|-1739.16| +2452208|62293|2452268|11183|38675|1029091|7060|29029|47359|1106649|6136|21305|9|5|14|2|20|152|9|13.19|24.79|19.08|51.39|171.72|118.71|223.11|3.70|109.90|62.46|61.82|65.52|124.28|127.98|-56.89| +2452208|62293|2452220|12851|38675|1029091|7060|29029|47359|1106649|6136|21305|59|9|7|2|297|152|3|75.52|154.81|24.76|390.15|74.28|226.56|464.43|0.74|0.00|208.98|74.28|75.02|283.26|284.00|-152.28| +2452208|62293|2452328|12657|38675|1029091|7060|29029|47359|1106649|6136|21305|29|21|10|1|225|152|8|13.11|37.62|1.88|285.92|15.04|104.88|300.96|0.90|0.00|51.12|15.04|15.94|66.16|67.06|-89.84| +2452208|62293|2452277|14315|38675|1029091|7060|29029|47359|1106649|6136|21305|45|23|14|1|228|152|52|62.42|121.71|26.77|4936.88|1392.04|3245.84|6328.92|4.45|1169.31|2278.12|222.73|227.18|2500.85|2505.30|-3023.11| +2452208|62293|2452302|12603|38675|1029091|7060|29029|47359|1106649|6136|21305|17|17|16|1|203|152|63|89.64|105.77|44.42|3865.05|2798.46|5647.32|6663.51|97.94|839.53|2131.92|1958.93|2056.87|4090.85|4188.79|-3688.39| +2452208|62293|2452304|14355|38675|1029091|7060|29029|47359|1106649|6136|21305|47|9|16|4|45|152|37|80.84|152.78|116.11|1356.79|4296.07|2991.08|5652.86|343.68|0.00|2543.75|4296.07|4639.75|6839.82|7183.50|1304.99| +2452208|62293|2452253|11285|38675|1029091|7060|29029|47359|1106649|6136|21305|53|23|14|1|11|152|54|48.95|97.41|89.61|421.20|4838.94|2643.30|5260.14|0.00|0.00|104.76|4838.94|4838.94|4943.70|4943.70|2195.64| +2451412|72051|2451496|5240|68664|7349|4434|9166|80155|1521653|4421|1953|2|22|12|4|79|153|26|60.89|88.89|15.11|1918.28|392.86|1583.14|2311.14|0.00|0.00|69.16|392.86|392.86|462.02|462.02|-1190.28| +2451412|72051|2451478|7052|68664|7349|4434|9166|80155|1521653|4421|1953|10|10|6|1|96|153|7|72.05|173.64|90.29|583.45|632.03|504.35|1215.48|50.56|0.00|449.68|632.03|682.59|1081.71|1132.27|127.68| +2451412|72051|2451456|5173|68664|7349|4434|9166|80155|1521653|4421|1953|44|26|15|4|100|153|72|41.09|42.32|22.42|1432.80|1614.24|2958.48|3047.04|112.99|0.00|578.88|1614.24|1727.23|2193.12|2306.11|-1344.24| +2451412|72051|2451448|12070|68664|7349|4434|9166|80155|1521653|4421|1953|14|20|17|5|73|153|44|76.98|88.52|40.71|2103.64|1791.24|3387.12|3894.88|0.00|0.00|778.80|1791.24|1791.24|2570.04|2570.04|-1595.88| +2451412|72051|2451414|3019|68664|7349|4434|9166|80155|1521653|4421|1953|22|10|7|3|99|153|16|79.99|99.98|76.98|368.00|1231.68|1279.84|1599.68|36.95|0.00|735.84|1231.68|1268.63|1967.52|2004.47|-48.16| +2451412|72051|2451438|6460|68664|7349|4434|9166|80155|1521653|4421|1953|7|26|14|1|231|153|97|68.04|142.88|11.43|12750.65|1108.71|6599.88|13859.36|55.43|0.00|5404.84|1108.71|1164.14|6513.55|6568.98|-5491.17| +2451412|72051|2451465|14239|68664|7349|4434|9166|80155|1521653|4421|1953|56|25|7|1|115|153|24|62.85|74.16|51.91|534.00|1245.84|1508.40|1779.84|0.00|0.00|160.08|1245.84|1245.84|1405.92|1405.92|-262.56| +2451412|72051|2451460|5491|68664|7349|4434|9166|80155|1521653|4421|1953|13|14|12|3|64|153|20|42.90|101.67|100.65|20.40|2013.00|858.00|2033.40|0.00|865.59|162.60|1147.41|1147.41|1310.01|1310.01|289.41| +2451412|72051|2451530|12964|68664|7349|4434|9166|80155|1521653|4421|1953|26|8|16|2|38|153|84|20.67|56.22|5.05|4298.28|424.20|1736.28|4722.48|25.45|0.00|849.24|424.20|449.65|1273.44|1298.89|-1312.08| +2452610|34471|2452712|16497|50874|30210|746|29682|50874|30210|746|29682|48|18|11|1|128|154|81|53.20|84.58|34.67|4042.71|2808.27|4309.20|6850.98|140.41|0.00|1575.45|2808.27|2948.68|4383.72|4524.13|-1500.93| +2452610|34471|2452686|7302|50874|30210|746|29682|50874|30210|746|29682|12|13|5|3|211|154|61|98.25|183.72|181.88|112.24|11094.68|5993.25|11206.92|110.94|0.00|4258.41|11094.68|11205.62|15353.09|15464.03|5101.43| +2452610|34471|2452651|7|50874|30210|746|29682|50874|30210|746|29682|9|12|14|5|256|154|39|88.90|93.34|8.40|3312.66|327.60|3467.10|3640.26|22.93|0.00|655.20|327.60|350.53|982.80|1005.73|-3139.50| +2452610|34471|2452653|2581|50874|30210|746|29682|50874|30210|746|29682|12|12|16|1|126|154|62|83.11|162.89|48.86|7069.86|3029.32|5152.82|10099.18|30.29|0.00|1110.42|3029.32|3059.61|4139.74|4170.03|-2123.50| +2452610|34471|2452671|12897|50874|30210|746|29682|50874|30210|746|29682|37|12|13|2|64|154|26|18.81|49.65|5.95|1136.20|154.70|489.06|1290.90|1.29|89.72|141.96|64.98|66.27|206.94|208.23|-424.08| +2452610|34471|2452679|15558|50874|30210|746|29682|50874|30210|746|29682|6|25|10|5|110|154|56|46.81|74.89|13.48|3438.96|754.88|2621.36|4193.84|22.64|0.00|2096.64|754.88|777.52|2851.52|2874.16|-1866.48| +2452610|34471|2452722|17946|50874|30210|746|29682|50874|30210|746|29682|45|6|13|5|70|154|20|80.61|90.28|49.65|812.60|993.00|1612.20|1805.60|39.72|0.00|415.20|993.00|1032.72|1408.20|1447.92|-619.20| +2452610|34471|2452712|17922|50874|30210|746|29682|50874|30210|746|29682|12|25|20|2|214|154|87|5.18|12.12|6.18|516.78|537.66|450.66|1054.44|43.01|0.00|400.20|537.66|580.67|937.86|980.87|87.00| +2451796|65903|2451824|8735|60125|95037|1149|35661|83535|140531|2844|3356|23|17|13|5|67|155|70|62.89|116.34|22.10|6596.80|1547.00|4402.30|8143.80|46.41|0.00|814.10|1547.00|1593.41|2361.10|2407.51|-2855.30| +2451796|65903|2451806|12599|60125|95037|1149|35661|83535|140531|2844|3356|23|20|7|3|52|155|54|70.64|113.02|56.51|3051.54|3051.54|3814.56|6103.08|61.03|0.00|2257.74|3051.54|3112.57|5309.28|5370.31|-763.02| +2451796|65903|2451841|5756|60125|95037|1149|35661|83535|140531|2844|3356|59|26|11|5|282|155|83|23.34|32.20|30.91|107.07|2565.53|1937.22|2672.60|55.41|1641.93|1015.09|923.60|979.01|1938.69|1994.10|-1013.62| +2451796|65903|2451843|2870|60125|95037|1149|35661|83535|140531|2844|3356|35|7|20|3|119|155|61|76.26|189.88|94.94|5791.34|5791.34|4651.86|11582.68|57.91|0.00|3126.86|5791.34|5849.25|8918.20|8976.11|1139.48| +2451796|65903|2451872|6965|60125|95037|1149|35661|83535|140531|2844|3356|53|20|5|1|30|155|41|99.28|163.81|153.98|403.03|6313.18|4070.48|6716.21|63.13|0.00|2887.63|6313.18|6376.31|9200.81|9263.94|2242.70| +2451796|65903|2451797|7031|60125|95037|1149|35661|83535|140531|2844|3356|19|1|12|1|117|155|46|78.02|193.48|106.41|4005.22|4894.86|3588.92|8900.08|160.55|2887.96|2402.58|2006.90|2167.45|4409.48|4570.03|-1582.02| +2451796|65903|2451881|16877|60125|95037|1149|35661|83535|140531|2844|3356|47|8|16|5|91|155|54|57.15|80.01|28.80|2765.34|1555.20|3086.10|4320.54|62.20|0.00|1598.40|1555.20|1617.40|3153.60|3215.80|-1530.90| +2451796|65903|2451867|17407|60125|95037|1149|35661|83535|140531|2844|3356|59|1|15|1|147|155|73|78.43|207.83|89.36|8648.31|6523.28|5725.39|15171.59|130.46|0.00|2427.25|6523.28|6653.74|8950.53|9080.99|797.89| +2451796|65903|2451879|12955|60125|95037|1149|35661|83535|140531|2844|3356|23|19|18|4|87|155|86|41.78|113.64|7.95|9089.34|683.70|3593.08|9773.04|0.00|560.63|585.66|123.07|123.07|708.73|708.73|-3470.01| +2451796|65903|2451862|8335|60125|95037|1149|35661|83535|140531|2844|3356|2|23|12|2|102|155|42|15.59|28.84|26.53|97.02|1114.26|654.78|1211.28|22.28|0.00|11.76|1114.26|1136.54|1126.02|1148.30|459.48| +2451796|65903|2451816|902|60125|95037|1149|35661|83535|140531|2844|3356|32|25|19|5|91|155|21|96.90|200.58|26.07|3664.71|547.47|2034.90|4212.18|49.27|0.00|1600.62|547.47|596.74|2148.09|2197.36|-1487.43| +2451796|65903|2451859|173|60125|95037|1149|35661|83535|140531|2844|3356|59|26|5|4|243|155|59|19.14|44.78|8.95|2113.97|528.05|1129.26|2642.02|21.12|0.00|1082.65|528.05|549.17|1610.70|1631.82|-601.21| +2451796|65903|2451841|8009|60125|95037|1149|35661|83535|140531|2844|3356|49|26|13|2|191|155|86|83.85|123.25|113.39|847.96|9751.54|7211.10|10599.50|682.60|0.00|2331.46|9751.54|10434.14|12083.00|12765.60|2540.44| +2451796|65903|2451842|7238|60125|95037|1149|35661|83535|140531|2844|3356|53|8|10|3|96|155|99|4.86|7.24|0.50|667.26|49.50|481.14|716.76|0.99|0.00|113.85|49.50|50.49|163.35|164.34|-431.64| +2451796|65903|2451913|1610|60125|95037|1149|35661|83535|140531|2844|3356|55|11|17|3|226|155|64|63.71|186.03|33.48|9763.20|2142.72|4077.44|11905.92|85.70|0.00|1547.52|2142.72|2228.42|3690.24|3775.94|-1934.72| +2451796|65903|2451859|13415|60125|95037|1149|35661|83535|140531|2844|3356|44|25|3|3|223|155|51|81.46|188.17|3.76|9404.91|191.76|4154.46|9596.67|0.00|0.00|575.79|191.76|191.76|767.55|767.55|-3962.70| +2451175|59206|2451210|15955|72140|959897|7|564|38687|399450|1341|32698|1|14|18|5|112|156|100|53.50|110.74|35.43|7531.00|3543.00|5350.00|11074.00|283.44|0.00|2436.00|3543.00|3826.44|5979.00|6262.44|-1807.00| +2451175|59206|2451279|11270|72140|959897|7|564|38687|399450|1341|32698|56|4|14|3|89|156|22|13.66|20.76|1.45|424.82|31.90|300.52|456.72|0.00|0.00|73.04|31.90|31.90|104.94|104.94|-268.62| +2451175|59206|2451266|9817|72140|959897|7|564|38687|399450|1341|32698|7|26|4|4|284|156|40|49.11|52.05|15.09|1478.40|603.60|1964.40|2082.00|0.00|0.00|1020.00|603.60|603.60|1623.60|1623.60|-1360.80| +2451175|59206|2451193|718|72140|959897|7|564|38687|399450|1341|32698|49|2|12|2|212|156|20|62.90|140.89|42.26|1972.60|845.20|1258.00|2817.80|0.00|845.20|253.60|0.00|0.00|253.60|253.60|-1258.00| +2451175|59206|2451187|15824|72140|959897|7|564|38687|399450|1341|32698|40|14|12|1|253|156|25|91.61|131.00|37.99|2325.25|949.75|2290.25|3275.00|28.49|0.00|1572.00|949.75|978.24|2521.75|2550.24|-1340.50| +2451175|59206|2451176|15080|72140|959897|7|564|38687|399450|1341|32698|7|26|7|2|217|156|20|10.53|17.26|6.38|217.60|127.60|210.60|345.20|10.20|0.00|103.40|127.60|137.80|231.00|241.20|-83.00| +2451175|59206|2451257|7676|72140|959897|7|564|38687|399450|1341|32698|4|20|19|2|192|156|53|80.61|200.71|28.09|9148.86|1488.77|4272.33|10637.63|0.00|0.00|2553.01|1488.77|1488.77|4041.78|4041.78|-2783.56| +2451175|59206|2451290|1093|72140|959897|7|564|38687|399450|1341|32698|1|26|14|4|104|156|40|47.51|128.27|41.04|3489.20|1641.60|1900.40|5130.80|147.74|0.00|0.00|1641.60|1789.34|1641.60|1789.34|-258.80| +2451175|59206|2451240|15853|72140|959897|7|564|38687|399450|1341|32698|38|19|3|3|176|156|4|12.35|36.55|27.04|38.04|108.16|49.40|146.20|1.08|0.00|21.92|108.16|109.24|130.08|131.16|58.76| +2451175|59206|2451268|7442|72140|959897|7|564|38687|399450|1341|32698|26|1|5|2|149|156|77|96.80|118.09|0.00|9092.93|0.00|7453.60|9092.93|0.00|0.00|272.58|0.00|0.00|272.58|272.58|-7453.60| +2451175|59206|2451248|11563|72140|959897|7|564|38687|399450|1341|32698|56|13|12|2|112|156|64|12.92|38.63|24.33|915.20|1557.12|826.88|2472.32|0.00|0.00|865.28|1557.12|1557.12|2422.40|2422.40|730.24| +2452629|20222|2452703|16539|16928|335672|1616|46360|64837|833823|592|18936|15|7|3|2|121|157|19|20.60|21.83|1.74|381.71|33.06|391.40|414.77|2.82|1.65|16.53|31.41|34.23|47.94|50.76|-359.99| +2452629|20222|2452664|17223|16928|335672|1616|46360|64837|833823|592|18936|42|9|6|2|38|157|34|3.31|5.95|2.79|107.44|94.86|112.54|202.30|2.84|0.00|68.68|94.86|97.70|163.54|166.38|-17.68| +2452629|20222|2452657|7705|16928|335672|1616|46360|64837|833823|592|18936|48|19|19|5|21|157|81|20.93|25.74|21.87|313.47|1771.47|1695.33|2084.94|88.57|0.00|396.09|1771.47|1860.04|2167.56|2256.13|76.14| +2452629|20222|2452687|9433|16928|335672|1616|46360|64837|833823|592|18936|31|19|17|1|248|157|44|39.02|70.23|35.11|1545.28|1544.84|1716.88|3090.12|92.69|0.00|1544.84|1544.84|1637.53|3089.68|3182.37|-172.04| +2452629|20222|2452701|16872|16928|335672|1616|46360|64837|833823|592|18936|45|19|17|4|31|157|56|46.83|55.72|16.15|2215.92|904.40|2622.48|3120.32|36.17|0.00|655.20|904.40|940.57|1559.60|1595.77|-1718.08| +2452629|20222|2452749|16431|16928|335672|1616|46360|64837|833823|592|18936|37|7|16|3|273|157|78|58.55|165.11|3.30|12621.18|257.40|4566.90|12878.58|4.94|175.03|772.20|82.37|87.31|854.57|859.51|-4484.53| +2452629|20222|2452743|14509|16928|335672|1616|46360|64837|833823|592|18936|12|7|15|3|134|157|31|84.21|119.57|44.24|2335.23|1371.44|2610.51|3706.67|109.71|0.00|148.18|1371.44|1481.15|1519.62|1629.33|-1239.07| +2452629|20222|2452655|8103|16928|335672|1616|46360|64837|833823|592|18936|36|19|19|3|150|157|23|36.69|75.94|20.50|1275.12|471.50|843.87|1746.62|23.57|0.00|87.17|471.50|495.07|558.67|582.24|-372.37| +2452629|20222|2452728|16869|16928|335672|1616|46360|64837|833823|592|18936|18|18|19|4|9|157|7|54.69|92.42|10.16|575.82|71.12|382.83|646.94|0.71|0.00|45.22|71.12|71.83|116.34|117.05|-311.71| +2452629|20222|2452688|1161|16928|335672|1616|46360|64837|833823|592|18936|12|7|5|1|97|157|98|12.17|18.37|4.77|1332.80|467.46|1192.66|1800.26|9.34|0.00|575.26|467.46|476.80|1042.72|1052.06|-725.20| +2452629|20222|2452647|6264|16928|335672|1616|46360|64837|833823|592|18936|55|27|10|1|92|157|91|40.61|82.43|14.01|6226.22|1274.91|3695.51|7501.13|21.03|854.18|749.84|420.73|441.76|1170.57|1191.60|-3274.78| +2452629|20222|2452735|17499|16928|335672|1616|46360|64837|833823|592|18936|33|12|2|1|280|157|42|53.16|136.08|91.17|1886.22|3829.14|2232.72|5715.36|229.74|0.00|2114.28|3829.14|4058.88|5943.42|6173.16|1596.42| +2452629|20222|2452663|5041|16928|335672|1616|46360|64837|833823|592|18936|60|25|16|2|195|157|64|26.19|33.26|32.59|42.88|2085.76|1676.16|2128.64|187.71|0.00|170.24|2085.76|2273.47|2256.00|2443.71|409.60| +2451522|64963|2451562|10736|60623|1003215|3407|39199|76905|174988|1422|27614|44|19|17|4|54|158|38|47.63|71.92|19.41|1995.38|737.58|1809.94|2732.96|22.12|0.00|163.78|737.58|759.70|901.36|923.48|-1072.36| +2451522|64963|2451554|9424|60623|1003215|3407|39199|76905|174988|1422|27614|49|7|13|2|31|158|35|7.76|10.47|10.26|7.35|359.10|271.60|366.45|21.54|0.00|76.65|359.10|380.64|435.75|457.29|87.50| +2451522|64963|2451525|5992|60623|1003215|3407|39199|76905|174988|1422|27614|50|14|20|2|254|158|33|88.82|256.68|89.83|5506.05|2964.39|2931.06|8470.44|148.21|0.00|2879.91|2964.39|3112.60|5844.30|5992.51|33.33| +2451522|64963|2451636|2384|60623|1003215|3407|39199|76905|174988|1422|27614|44|19|17|2|264|158|51|87.46|118.94|49.95|3518.49|2547.45|4460.46|6065.94|203.79|0.00|667.08|2547.45|2751.24|3214.53|3418.32|-1913.01| +2451522|64963|2451594|7891|60623|1003215|3407|39199|76905|174988|1422|27614|32|2|14|4|61|158|86|46.85|60.43|9.66|4366.22|830.76|4029.10|5196.98|41.53|0.00|1506.72|830.76|872.29|2337.48|2379.01|-3198.34| +2451522|64963|2451579|10786|60623|1003215|3407|39199|76905|174988|1422|27614|50|28|1|2|146|158|97|21.14|55.59|4.44|4961.55|430.68|2050.58|5392.23|6.20|120.59|1616.99|310.09|316.29|1927.08|1933.28|-1740.49| +2451522|64963|2451581|17659|60623|1003215|3407|39199|76905|174988|1422|27614|19|22|11|3|55|158|45|46.79|109.02|2.18|4807.80|98.10|2105.55|4905.90|3.92|0.00|245.25|98.10|102.02|343.35|347.27|-2007.45| +2451522|64963|2451529|9688|60623|1003215|3407|39199|76905|174988|1422|27614|28|26|6|5|57|158|51|71.80|145.03|1.45|7322.58|73.95|3661.80|7396.53|2.21|0.00|2736.66|73.95|76.16|2810.61|2812.82|-3587.85| +2451522|64963|2451527|13120|60623|1003215|3407|39199|76905|174988|1422|27614|50|26|9|2|285|158|65|33.49|98.12|8.83|5803.85|573.95|2176.85|6377.80|22.95|0.00|2614.30|573.95|596.90|3188.25|3211.20|-1602.90| +2451522|64963|2451567|8050|60623|1003215|3407|39199|76905|174988|1422|27614|40|20|4|4|4|158|53|39.09|44.17|9.27|1849.70|491.31|2071.77|2341.01|4.12|78.60|1029.79|412.71|416.83|1442.50|1446.62|-1659.06| +2451522|64963|2451637|991|60623|1003215|3407|39199|76905|174988|1422|27614|26|26|11|3|178|158|86|30.65|85.82|21.45|5535.82|1844.70|2635.90|7380.52|129.12|0.00|1623.68|1844.70|1973.82|3468.38|3597.50|-791.20| +2452559|49265|2452617|6342|28340|316285|4677|22222|52173|310686|3751|36496|9|15|1|4|137|159|40|78.13|217.98|67.57|6016.40|2702.80|3125.20|8719.20|162.16|0.00|3487.60|2702.80|2864.96|6190.40|6352.56|-422.40| +2452559|49265|2452583|15741|28340|316285|4677|22222|52173|310686|3751|36496|21|30|19|3|244|159|74|25.05|71.64|62.32|689.68|4611.68|1853.70|5301.36|0.00|0.00|211.64|4611.68|4611.68|4823.32|4823.32|2757.98| +2452559|49265|2452642|11773|28340|316285|4677|22222|52173|310686|3751|36496|7|12|15|2|169|159|24|47.50|139.17|48.70|2171.28|1168.80|1140.00|3340.08|81.81|0.00|1669.92|1168.80|1250.61|2838.72|2920.53|28.80| +2452559|49265|2452608|5167|28340|316285|4677|22222|52173|310686|3751|36496|24|18|6|3|263|159|28|80.05|176.11|176.11|0.00|4931.08|2241.40|4931.08|98.62|0.00|394.24|4931.08|5029.70|5325.32|5423.94|2689.68| +2452559|49265|2452608|11259|28340|316285|4677|22222|52173|310686|3751|36496|51|21|16|1|116|159|80|1.46|2.51|1.83|54.40|146.40|116.80|200.80|0.57|127.36|9.60|19.04|19.61|28.64|29.21|-97.76| +2452559|49265|2452620|9553|28340|316285|4677|22222|52173|310686|3751|36496|19|1|12|3|274|159|86|69.18|145.96|32.11|9791.10|2761.46|5949.48|12552.56|248.53|0.00|5648.48|2761.46|3009.99|8409.94|8658.47|-3188.02| +2452559|49265|2452602|1938|28340|316285|4677|22222|52173|310686|3751|36496|49|1|4|2|3|159|8|58.91|141.38|4.24|1097.12|33.92|471.28|1131.04|0.00|8.48|282.72|25.44|25.44|308.16|308.16|-445.84| +2452559|49265|2452561|8217|28340|316285|4677|22222|52173|310686|3751|36496|39|9|13|1|66|159|28|75.24|122.64|95.65|755.72|2678.20|2106.72|3433.92|107.12|0.00|1476.44|2678.20|2785.32|4154.64|4261.76|571.48| +2452559|49265|2452675|2547|28340|316285|4677|22222|52173|310686|3751|36496|48|7|7|2|260|159|97|57.82|139.92|36.37|10044.35|3527.89|5608.54|13572.24|0.00|0.00|4614.29|3527.89|3527.89|8142.18|8142.18|-2080.65| +2452559|49265|2452626|12486|28340|316285|4677|22222|52173|310686|3751|36496|48|21|1|1|71|159|53|51.91|138.59|2.77|7198.46|146.81|2751.23|7345.27|2.93|0.00|3672.37|146.81|149.74|3819.18|3822.11|-2604.42| +2452559|49265|2452654|7500|28340|316285|4677|22222|52173|310686|3751|36496|51|9|4|3|219|159|52|62.80|142.55|89.80|2743.00|4669.60|3265.60|7412.60|271.30|793.83|518.44|3875.77|4147.07|4394.21|4665.51|610.17| +2452559|49265|2452601|17169|28340|316285|4677|22222|52173|310686|3751|36496|7|18|7|2|250|159|45|15.75|19.53|18.94|26.55|852.30|708.75|878.85|51.13|0.00|228.15|852.30|903.43|1080.45|1131.58|143.55| +2452559|49265|2452633|7593|28340|316285|4677|22222|52173|310686|3751|36496|57|1|16|3|53|159|62|79.11|109.17|6.55|6362.44|406.10|4904.82|6768.54|4.06|0.00|676.42|406.10|410.16|1082.52|1086.58|-4498.72| +2452559|49265|2452647|307|28340|316285|4677|22222|52173|310686|3751|36496|31|30|4|5|19|159|71|18.86|41.30|15.28|1847.42|1084.88|1339.06|2932.30|21.69|0.00|673.79|1084.88|1106.57|1758.67|1780.36|-254.18| +2451496|74442|2451579|10408|39288|1913042|3949|40903|39288|1913042|3949|40903|32|10|5|4|63|160|87|62.62|162.81|89.54|6374.49|7789.98|5447.94|14164.47|623.19|0.00|5099.07|7789.98|8413.17|12889.05|13512.24|2342.04| +2451496|74442|2451536|14569|39288|1913042|3949|40903|39288|1913042|3949|40903|10|19|14|3|255|160|9|96.64|225.17|139.60|770.13|1256.40|869.76|2026.53|42.08|414.61|101.25|841.79|883.87|943.04|985.12|-27.97| +2451496|74442|2451521|1124|39288|1913042|3949|40903|39288|1913042|3949|40903|40|1|11|1|271|160|48|69.30|198.89|95.46|4964.64|4582.08|3326.40|9546.72|229.10|0.00|3627.36|4582.08|4811.18|8209.44|8438.54|1255.68| +2451496|74442|2451577|10562|39288|1913042|3949|40903|39288|1913042|3949|40903|49|16|3|2|268|160|77|35.63|67.69|29.10|2971.43|2240.70|2743.51|5212.13|156.84|0.00|2397.01|2240.70|2397.54|4637.71|4794.55|-502.81| +2451496|74442|2451521|15745|39288|1913042|3949|40903|39288|1913042|3949|40903|34|28|2|4|216|160|59|31.01|38.76|2.32|2149.96|136.88|1829.59|2286.84|4.10|0.00|457.25|136.88|140.98|594.13|598.23|-1692.71| +2451496|74442|2451503|15139|39288|1913042|3949|40903|39288|1913042|3949|40903|14|28|19|1|133|160|48|82.26|200.71|108.38|4431.84|5202.24|3948.48|9634.08|104.04|0.00|2601.12|5202.24|5306.28|7803.36|7907.40|1253.76| +2451496|74442|2451550|15625|39288|1913042|3949|40903|39288|1913042|3949|40903|56|10|16|1|283|160|83|78.04|113.15|53.18|4977.51|4413.94|6477.32|9391.45|308.97|0.00|4319.32|4413.94|4722.91|8733.26|9042.23|-2063.38| +2451496|74442|2451568|15530|39288|1913042|3949|40903|39288|1913042|3949|40903|8|26|13|2|71|160|65|28.20|72.75|32.73|2601.30|2127.45|1833.00|4728.75|42.54|0.00|378.30|2127.45|2169.99|2505.75|2548.29|294.45| +2451496|74442|2451516|9802|39288|1913042|3949|40903|39288|1913042|3949|40903|52|7|14|4|4|160|69|19.76|30.03|6.30|1637.37|434.70|1363.44|2072.07|4.34|0.00|289.80|434.70|439.04|724.50|728.84|-928.74| +2451496|74442|2451554|4810|39288|1913042|3949|40903|39288|1913042|3949|40903|32|28|6|4|2|160|56|36.13|83.82|38.55|2535.12|2158.80|2023.28|4693.92|34.54|431.76|1407.84|1727.04|1761.58|3134.88|3169.42|-296.24| +2451496|74442|2451591|8593|39288|1913042|3949|40903|39288|1913042|3949|40903|50|14|20|2|201|160|20|44.35|81.60|15.50|1322.00|310.00|887.00|1632.00|18.60|0.00|65.20|310.00|328.60|375.20|393.80|-577.00| +2451496|74442|2451530|7382|39288|1913042|3949|40903|39288|1913042|3949|40903|16|16|16|1|23|160|99|75.98|106.37|49.99|5581.62|4949.01|7522.02|10530.63|296.94|0.00|3053.16|4949.01|5245.95|8002.17|8299.11|-2573.01| +2451496|74442|2451614|1532|39288|1913042|3949|40903|39288|1913042|3949|40903|58|10|15|1|160|160|5|77.72|90.93|13.63|386.50|68.15|388.60|454.65|0.00|0.00|9.05|68.15|68.15|77.20|77.20|-320.45| +2451962|19052|2452058|9943|96248|810654|6660|31913|69550|1565406|731|1584|21|9|6|5|31|161|49|14.78|27.63|6.07|1056.44|297.43|724.22|1353.87|26.76|0.00|53.90|297.43|324.19|351.33|378.09|-426.79| +2451962|19052|2452079|5403|96248|810654|6660|31913|69550|1565406|731|1584|53|29|13|1|129|161|9|98.99|103.93|28.06|682.83|252.54|890.91|935.37|12.95|108.59|355.41|143.95|156.90|499.36|512.31|-746.96| +2451962|19052|2451979|11709|96248|810654|6660|31913|69550|1565406|731|1584|3|1|3|2|44|161|83|97.22|111.80|69.31|3526.67|5752.73|8069.26|9279.40|345.16|0.00|3525.84|5752.73|6097.89|9278.57|9623.73|-2316.53| +2451962|19052|2452035|2031|96248|810654|6660|31913|69550|1565406|731|1584|57|3|15|1|64|161|89|67.69|201.71|108.92|8258.31|9693.88|6024.41|17952.19|581.63|0.00|5923.84|9693.88|10275.51|15617.72|16199.35|3669.47| +2451962|19052|2452011|3459|96248|810654|6660|31913|69550|1565406|731|1584|5|29|14|1|88|161|41|47.30|102.16|66.40|1466.16|2722.40|1939.30|4188.56|217.79|0.00|753.58|2722.40|2940.19|3475.98|3693.77|783.10| +2451962|19052|2452011|14247|96248|810654|6660|31913|69550|1565406|731|1584|35|15|1|1|204|161|67|34.00|100.30|80.24|1344.02|5376.08|2278.00|6720.10|376.32|0.00|402.67|5376.08|5752.40|5778.75|6155.07|3098.08| +2451962|19052|2451995|12069|96248|810654|6660|31913|69550|1565406|731|1584|7|9|13|4|15|161|28|51.13|114.53|25.19|2501.52|705.32|1431.64|3206.84|16.64|289.18|609.28|416.14|432.78|1025.42|1042.06|-1015.50| +2451962|19052|2452030|10615|96248|810654|6660|31913|69550|1565406|731|1584|9|13|19|3|140|161|36|92.80|116.00|22.04|3382.56|793.44|3340.80|4176.00|0.00|0.00|1837.44|793.44|793.44|2630.88|2630.88|-2547.36| +2451962|19052|2452039|12313|96248|810654|6660|31913|69550|1565406|731|1584|17|1|10|4|81|161|74|24.43|66.69|43.34|1727.90|3207.16|1807.82|4935.06|224.50|0.00|1924.00|3207.16|3431.66|5131.16|5355.66|1399.34| +2451962|19052|2452011|17675|96248|810654|6660|31913|69550|1565406|731|1584|9|27|3|5|244|161|36|35.16|80.16|73.74|231.12|2654.64|1265.76|2885.76|79.63|0.00|1154.16|2654.64|2734.27|3808.80|3888.43|1388.88| +2451962|19052|2452061|1173|96248|810654|6660|31913|69550|1565406|731|1584|23|7|9|1|63|161|65|11.84|32.20|9.33|1486.55|606.45|769.60|2093.00|18.19|0.00|334.75|606.45|624.64|941.20|959.39|-163.15| +2451962|19052|2451964|4775|96248|810654|6660|31913|69550|1565406|731|1584|9|29|15|5|105|161|36|51.29|98.47|41.35|2056.32|1488.60|1846.44|3544.92|59.54|0.00|921.60|1488.60|1548.14|2410.20|2469.74|-357.84| +2451962|19052|2452021|5503|96248|810654|6660|31913|69550|1565406|731|1584|49|13|6|5|184|161|26|28.25|41.52|28.23|345.54|733.98|734.50|1079.52|14.67|0.00|183.30|733.98|748.65|917.28|931.95|-0.52| +2451962|19052|2451980|7043|96248|810654|6660|31913|69550|1565406|731|1584|9|11|17|3|189|161|65|20.21|28.69|20.08|559.65|1305.20|1313.65|1864.85|0.00|0.00|745.55|1305.20|1305.20|2050.75|2050.75|-8.45| +2451962|19052|2451988|15833|96248|810654|6660|31913|69550|1565406|731|1584|51|9|14|1|235|161|77|68.67|153.13|22.96|10023.09|1767.92|5287.59|11791.01|141.43|0.00|2475.55|1767.92|1909.35|4243.47|4384.90|-3519.67| +2451806|38537|2451844|11264|82399|1584419|1992|4812|82399|1584419|1992|4812|47|8|13|1|239|162|80|93.06|222.41|106.75|9252.80|8540.00|7444.80|17792.80|422.73|85.40|3024.00|8454.60|8877.33|11478.60|11901.33|1009.80| +2451806|38537|2451877|4385|82399|1584419|1992|4812|82399|1584419|1992|4812|20|26|7|5|23|162|39|31.03|92.77|38.03|2134.86|1483.17|1210.17|3618.03|74.15|0.00|1230.06|1483.17|1557.32|2713.23|2787.38|273.00| +2451806|38537|2451830|16183|82399|1584419|1992|4812|82399|1584419|1992|4812|13|17|1|3|248|162|31|42.64|44.34|7.09|1154.75|219.79|1321.84|1374.54|5.53|81.32|137.33|138.47|144.00|275.80|281.33|-1183.37| +2451806|38537|2451906|9536|82399|1584419|1992|4812|82399|1584419|1992|4812|50|2|18|2|223|162|15|50.73|119.21|50.06|1037.25|750.90|760.95|1788.15|0.00|0.00|393.30|750.90|750.90|1144.20|1144.20|-10.05| +2451806|38537|2451884|8780|82399|1584419|1992|4812|82399|1584419|1992|4812|35|23|6|5|54|162|38|42.37|63.55|31.77|1207.64|1207.26|1610.06|2414.90|72.43|0.00|651.70|1207.26|1279.69|1858.96|1931.39|-402.80| +2451806|38537|2451911|14041|82399|1584419|1992|4812|82399|1584419|1992|4812|20|23|16|2|248|162|29|37.77|97.82|71.40|766.18|2070.60|1095.33|2836.78|125.89|496.94|765.89|1573.66|1699.55|2339.55|2465.44|478.33| +2451806|38537|2451858|8531|82399|1584419|1992|4812|82399|1584419|1992|4812|59|7|8|4|113|162|23|42.99|86.40|58.75|635.95|1351.25|988.77|1987.20|121.61|0.00|695.52|1351.25|1472.86|2046.77|2168.38|362.48| +2451806|38537|2451814|11669|82399|1584419|1992|4812|82399|1584419|1992|4812|37|11|13|2|124|162|6|73.06|78.90|75.74|18.96|454.44|438.36|473.40|31.81|0.00|99.36|454.44|486.25|553.80|585.61|16.08| +2451097|71478|2451102|16564|95885|1105012|6139|40866|99773|1811145|5400|527|44|26|8|2|213|163|81|72.46|81.87|67.95|1127.52|5503.95|5869.26|6631.47|55.03|0.00|1060.29|5503.95|5558.98|6564.24|6619.27|-365.31| +2451097|71478|2451150|9454|95885|1105012|6139|40866|99773|1811145|5400|527|1|2|13|1|102|163|83|81.59|235.79|153.26|6849.99|12720.58|6771.97|19570.57|10.17|11702.93|977.74|1017.65|1027.82|1995.39|2005.56|-5754.32| +2451097|71478|2451164|6313|95885|1105012|6139|40866|99773|1811145|5400|527|58|22|4|1|18|163|87|39.68|69.44|44.44|2175.00|3866.28|3452.16|6041.28|0.00|0.00|2536.92|3866.28|3866.28|6403.20|6403.20|414.12| +2451097|71478|2451192|7994|95885|1105012|6139|40866|99773|1811145|5400|527|13|25|12|3|204|163|70|46.21|109.05|42.52|4657.10|2976.40|3234.70|7633.50|208.34|0.00|2137.10|2976.40|3184.74|5113.50|5321.84|-258.30| +2451097|71478|2451152|6829|95885|1105012|6139|40866|99773|1811145|5400|527|46|25|19|3|204|163|52|7.47|10.08|9.07|52.52|471.64|388.44|524.16|28.29|0.00|204.36|471.64|499.93|676.00|704.29|83.20| +2451097|71478|2451198|16064|95885|1105012|6139|40866|99773|1811145|5400|527|8|10|7|3|159|163|43|4.81|7.84|3.92|168.56|168.56|206.83|337.12|8.42|0.00|0.00|168.56|176.98|168.56|176.98|-38.27| +2451097|71478|2451211|7880|95885|1105012|6139|40866|99773|1811145|5400|527|44|16|2|3|11|163|100|81.05|203.43|164.77|3866.00|16477.00|8105.00|20343.00|118.63|12522.52|3865.00|3954.48|4073.11|7819.48|7938.11|-4150.52| +2451097|71478|2451107|16046|95885|1105012|6139|40866|99773|1811145|5400|527|34|26|13|4|198|163|73|42.79|60.33|39.21|1541.76|2862.33|3123.67|4404.09|0.00|0.00|131.40|2862.33|2862.33|2993.73|2993.73|-261.34| +2451097|71478|2451164|9952|95885|1105012|6139|40866|99773|1811145|5400|527|7|25|5|2|79|163|67|97.87|133.10|75.86|3835.08|5082.62|6557.29|8917.70|457.43|0.00|89.11|5082.62|5540.05|5171.73|5629.16|-1474.67| +2451097|71478|2451176|560|95885|1105012|6139|40866|99773|1811145|5400|527|19|2|7|2|257|163|34|24.91|41.10|38.22|97.92|1299.48|846.94|1397.40|116.95|0.00|558.96|1299.48|1416.43|1858.44|1975.39|452.54| +2451097|71478|2451099|12799|95885|1105012|6139|40866|99773|1811145|5400|527|10|22|11|5|159|163|71|59.18|110.07|35.22|5314.35|2500.62|4201.78|7814.97|75.01|0.00|1718.91|2500.62|2575.63|4219.53|4294.54|-1701.16| +2451097|71478|2451185|5870|95885|1105012|6139|40866|99773|1811145|5400|527|37|20|16|1|213|163|62|14.96|32.16|5.46|1655.40|338.52|927.52|1993.92|16.92|0.00|338.52|338.52|355.44|677.04|693.96|-589.00| +2451097|71478|2451108|13594|95885|1105012|6139|40866|99773|1811145|5400|527|7|1|8|5|82|163|80|47.78|141.42|111.72|2376.00|8937.60|3822.40|11313.60|625.63|0.00|5316.80|8937.60|9563.23|14254.40|14880.03|5115.20| +2451097|71478|2451187|13898|95885|1105012|6139|40866|99773|1811145|5400|527|25|16|15|1|177|163|99|20.51|22.76|6.82|1578.06|675.18|2030.49|2253.24|2.56|546.89|788.04|128.29|130.85|916.33|918.89|-1902.20| +2451097|71478|2451205|10225|95885|1105012|6139|40866|99773|1811145|5400|527|8|10|14|5|46|163|68|34.11|57.30|44.12|896.24|3000.16|2319.48|3896.40|150.00|0.00|1597.32|3000.16|3150.16|4597.48|4747.48|680.68| +2452635|68960|2452658|4129|36298|1068565|832|32746|65983|93728|5547|33461|3|21|10|2|181|164|86|78.17|195.42|48.85|12605.02|4201.10|6722.62|16806.12|336.08|0.00|3192.32|4201.10|4537.18|7393.42|7729.50|-2521.52| +2452635|68960|2452668|7281|36298|1068565|832|32746|65983|93728|5547|33461|42|27|4|2|277|164|15|81.08|162.97|65.18|1466.85|977.70|1216.20|2444.55|2.73|703.94|806.70|273.76|276.49|1080.46|1083.19|-942.44| +2452635|68960|2452671|14628|36298|1068565|832|32746|65983|93728|5547|33461|31|21|16|1|26|164|86|90.38|234.08|11.70|19124.68|1006.20|7772.68|20130.88|40.24|0.00|2818.22|1006.20|1046.44|3824.42|3864.66|-6766.48| +2452635|68960|2452640|12861|36298|1068565|832|32746|65983|93728|5547|33461|12|25|17|4|11|164|5|57.70|152.32|56.35|479.85|281.75|288.50|761.60|14.08|0.00|83.75|281.75|295.83|365.50|379.58|-6.75| +2452635|68960|2452718|16398|36298|1068565|832|32746|65983|93728|5547|33461|15|12|1|4|104|164|8|16.59|28.20|25.09|24.88|200.72|132.72|225.60|13.32|34.12|103.76|166.60|179.92|270.36|283.68|33.88| +2452635|68960|2452684|14157|36298|1068565|832|32746|65983|93728|5547|33461|42|30|2|1|73|164|71|44.85|85.21|23.00|4416.91|1633.00|3184.35|6049.91|97.98|0.00|241.40|1633.00|1730.98|1874.40|1972.38|-1551.35| +2452635|68960|2452702|16417|36298|1068565|832|32746|65983|93728|5547|33461|19|19|17|4|181|164|1|28.81|65.39|35.31|30.08|35.31|28.81|65.39|3.17|0.00|28.11|35.31|38.48|63.42|66.59|6.50| +2452635|68960|2452666|1098|36298|1068565|832|32746|65983|93728|5547|33461|31|12|9|2|262|164|92|98.78|255.84|194.43|5649.72|17887.56|9087.76|23537.28|1073.25|0.00|11533.12|17887.56|18960.81|29420.68|30493.93|8799.80| +2452635|68960|2452646|2772|36298|1068565|832|32746|65983|93728|5547|33461|24|19|15|3|11|164|31|46.47|66.91|59.54|228.47|1845.74|1440.57|2074.21|55.37|0.00|20.46|1845.74|1901.11|1866.20|1921.57|405.17| +2452635|68960|2452644|72|36298|1068565|832|32746|65983|93728|5547|33461|12|21|16|1|266|164|11|67.23|69.24|22.15|517.99|243.65|739.53|761.64|9.21|112.07|243.65|131.58|140.79|375.23|384.44|-607.95| +2452635|68960|2452650|15603|36298|1068565|832|32746|65983|93728|5547|33461|54|30|20|3|293|164|37|42.50|57.37|24.66|1210.27|912.42|1572.50|2122.69|45.62|0.00|403.30|912.42|958.04|1315.72|1361.34|-660.08| +2452635|68960|2452727|16309|36298|1068565|832|32746|65983|93728|5547|33461|33|13|10|5|233|164|66|72.75|202.97|117.72|5626.50|7769.52|4801.50|13396.02|48.17|5360.96|4152.72|2408.56|2456.73|6561.28|6609.45|-2392.94| +2452635|68960|2452665|6249|36298|1068565|832|32746|65983|93728|5547|33461|43|27|3|5|34|164|31|71.60|189.74|108.15|2529.29|3352.65|2219.60|5881.94|201.15|0.00|1117.55|3352.65|3553.80|4470.20|4671.35|1133.05| +2452596|76372|2452632|5755|59032|1902956|2080|8757|65407|1231216|2482|19606|1|12|18|3|5|165|85|59.18|139.66|110.33|2493.05|9378.05|5030.30|11871.10|33.76|8534.02|5223.25|844.03|877.79|6067.28|6101.04|-4186.27| +2452596|76372|2452613|4506|59032|1902956|2080|8757|65407|1231216|2482|19606|12|7|12|3|21|165|42|63.81|178.02|69.42|4561.20|2915.64|2680.02|7476.84|116.62|0.00|1719.48|2915.64|3032.26|4635.12|4751.74|235.62| +2452596|76372|2452606|6063|59032|1902956|2080|8757|65407|1231216|2482|19606|15|30|5|5|202|165|83|26.85|39.46|35.90|295.48|2979.70|2228.55|3275.18|89.39|0.00|229.08|2979.70|3069.09|3208.78|3298.17|751.15| +2452596|76372|2452700|9721|59032|1902956|2080|8757|65407|1231216|2482|19606|57|6|7|3|232|165|27|89.25|261.50|44.45|5860.35|1200.15|2409.75|7060.50|84.01|0.00|847.26|1200.15|1284.16|2047.41|2131.42|-1209.60| +2452596|76372|2452682|10491|59032|1902956|2080|8757|65407|1231216|2482|19606|12|30|11|2|153|165|10|72.62|131.44|2.62|1288.20|26.20|726.20|1314.40|1.83|0.00|13.10|26.20|28.03|39.30|41.13|-700.00| +2452596|76372|2452689|1963|59032|1902956|2080|8757|65407|1231216|2482|19606|51|9|14|3|122|165|15|43.49|60.01|35.40|369.15|531.00|652.35|900.15|37.17|0.00|9.00|531.00|568.17|540.00|577.17|-121.35| +2452596|76372|2452662|11808|59032|1902956|2080|8757|65407|1231216|2482|19606|30|9|2|4|49|165|36|54.53|147.23|91.28|2014.20|3286.08|1963.08|5300.28|45.34|1018.68|1589.76|2267.40|2312.74|3857.16|3902.50|304.32| +2452596|76372|2452682|9925|59032|1902956|2080|8757|65407|1231216|2482|19606|48|18|1|1|98|165|18|93.19|111.82|101.75|181.26|1831.50|1677.42|2012.76|54.94|0.00|744.66|1831.50|1886.44|2576.16|2631.10|154.08| +2452596|76372|2452688|15717|59032|1902956|2080|8757|65407|1231216|2482|19606|31|15|13|3|210|165|41|76.91|155.35|124.28|1273.87|5095.48|3153.31|6369.35|6.11|4891.66|700.28|203.82|209.93|904.10|910.21|-2949.49| +2452596|76372|2452713|8131|59032|1902956|2080|8757|65407|1231216|2482|19606|25|1|19|2|175|165|23|64.41|151.36|49.94|2332.66|1148.62|1481.43|3481.28|6.20|1045.24|870.32|103.38|109.58|973.70|979.90|-1378.05| +2452596|76372|2452617|3570|59032|1902956|2080|8757|65407|1231216|2482|19606|42|24|13|4|225|165|60|31.11|78.39|19.59|3528.00|1175.40|1866.60|4703.40|23.50|0.00|2163.00|1175.40|1198.90|3338.40|3361.90|-691.20| +2452596|76372|2452685|3373|59032|1902956|2080|8757|65407|1231216|2482|19606|55|3|13|4|125|165|83|17.99|41.73|36.30|450.69|3012.90|1493.17|3463.59|90.38|0.00|103.75|3012.90|3103.28|3116.65|3207.03|1519.73| +2452596|76372|2452684|4737|59032|1902956|2080|8757|65407|1231216|2482|19606|60|6|9|2|95|165|5|25.70|62.19|10.57|258.10|52.85|128.50|310.95|4.75|0.00|62.15|52.85|57.60|115.00|119.75|-75.65| +2452596|76372|2452662|4263|59032|1902956|2080|8757|65407|1231216|2482|19606|60|1|19|5|71|165|41|80.95|152.99|148.40|188.19|6084.40|3318.95|6272.59|304.22|0.00|3010.63|6084.40|6388.62|9095.03|9399.25|2765.45| +2452596|76372|2452676|9618|59032|1902956|2080|8757|65407|1231216|2482|19606|3|12|13|2|71|165|20|40.97|55.30|38.71|331.80|774.20|819.40|1106.00|4.95|712.26|464.40|61.94|66.89|526.34|531.29|-757.46| +2452596|76372|2452609|12373|59032|1902956|2080|8757|65407|1231216|2482|19606|15|13|16|2|131|165|22|66.88|71.56|1.43|1542.86|31.46|1471.36|1574.32|1.57|0.00|393.58|31.46|33.03|425.04|426.61|-1439.90| +2452223|69732|2452254|9707|70307|470143|2332|32968|52448|905632|7182|27418|49|15|1|4|257|166|85|43.25|111.15|8.89|8692.10|755.65|3676.25|9447.75|6.65|672.52|566.10|83.13|89.78|649.23|655.88|-3593.12| +2452223|69732|2452232|8171|70307|470143|2332|32968|52448|905632|7182|27418|15|19|12|4|226|166|91|65.68|162.22|71.37|8267.35|6494.67|5976.88|14762.02|519.57|0.00|4871.23|6494.67|7014.24|11365.90|11885.47|517.79| +2452223|69732|2452231|279|70307|470143|2332|32968|52448|905632|7182|27418|53|7|11|3|288|166|66|45.12|60.91|15.83|2975.28|1044.78|2977.92|4020.06|83.58|0.00|321.42|1044.78|1128.36|1366.20|1449.78|-1933.14| +2452223|69732|2452314|727|70307|470143|2332|32968|52448|905632|7182|27418|29|23|9|1|183|166|91|92.65|178.81|112.65|6020.56|10251.15|8431.15|16271.71|615.06|0.00|3904.81|10251.15|10866.21|14155.96|14771.02|1820.00| +2452223|69732|2452233|1845|70307|470143|2332|32968|52448|905632|7182|27418|17|13|2|2|285|166|79|34.27|67.16|55.07|955.11|4350.53|2707.33|5305.64|87.01|0.00|2121.94|4350.53|4437.54|6472.47|6559.48|1643.20| +2452223|69732|2452235|11761|70307|470143|2332|32968|52448|905632|7182|27418|57|17|19|4|189|166|30|53.83|69.44|64.57|146.10|1937.10|1614.90|2083.20|19.37|0.00|333.30|1937.10|1956.47|2270.40|2289.77|322.20| +2452223|69732|2452334|4707|70307|470143|2332|32968|52448|905632|7182|27418|27|27|4|2|157|166|18|84.41|239.72|196.57|776.70|3538.26|1519.38|4314.96|247.67|0.00|1380.78|3538.26|3785.93|4919.04|5166.71|2018.88| +2452223|69732|2452299|3599|70307|470143|2332|32968|52448|905632|7182|27418|1|29|2|1|13|166|14|78.87|94.64|43.53|715.54|609.42|1104.18|1324.96|54.84|0.00|251.72|609.42|664.26|861.14|915.98|-494.76| +2451066|45529|2451107|3241|31257|909296|2535|33374|31377|1287773|2691|35883|8|10|7|3|39|167|68|35.57|43.39|21.26|1504.84|1445.68|2418.76|2950.52|130.11|0.00|294.44|1445.68|1575.79|1740.12|1870.23|-973.08| +2451066|45529|2451124|8605|31257|909296|2535|33374|31377|1287773|2691|35883|25|26|5|3|24|167|6|37.90|74.28|31.94|254.04|191.64|227.40|445.68|0.38|172.47|35.64|19.17|19.55|54.81|55.19|-208.23| +2451066|45529|2451172|3092|31257|909296|2535|33374|31377|1287773|2691|35883|14|13|19|3|260|167|99|70.19|142.48|39.89|10156.41|3949.11|6948.81|14105.52|236.94|0.00|5218.29|3949.11|4186.05|9167.40|9404.34|-2999.70| +2451066|45529|2451149|8834|31257|909296|2535|33374|31377|1287773|2691|35883|44|2|11|5|89|167|20|87.86|250.40|177.78|1452.40|3555.60|1757.20|5008.00|71.11|0.00|1802.80|3555.60|3626.71|5358.40|5429.51|1798.40| +2451066|45529|2451079|17228|31257|909296|2535|33374|31377|1287773|2691|35883|55|4|11|1|184|167|4|2.59|4.76|4.04|2.88|16.16|10.36|19.04|1.13|0.00|3.96|16.16|17.29|20.12|21.25|5.80| +2451066|45529|2451122|9988|31257|909296|2535|33374|31377|1287773|2691|35883|19|25|6|4|292|167|11|88.71|91.37|62.13|321.64|683.43|975.81|1005.07|3.55|594.58|90.42|88.85|92.40|179.27|182.82|-886.96| +2451066|45529|2451131|16633|31257|909296|2535|33374|31377|1287773|2691|35883|1|20|11|1|34|167|28|8.20|10.66|3.19|209.16|89.32|229.60|298.48|0.89|0.00|131.32|89.32|90.21|220.64|221.53|-140.28| +2451066|45529|2451119|16057|31257|909296|2535|33374|31377|1287773|2691|35883|28|7|10|4|195|167|18|67.22|179.47|21.53|2842.92|387.54|1209.96|3230.46|2.17|170.51|1421.28|217.03|219.20|1638.31|1640.48|-992.93| +2451066|45529|2451180|13754|31257|909296|2535|33374|31377|1287773|2691|35883|43|7|13|5|174|167|31|85.30|243.10|143.42|3090.08|4446.02|2644.30|7536.10|177.84|0.00|753.61|4446.02|4623.86|5199.63|5377.47|1801.72| +2452393|60887|2452493|13623|98729|295328|2680|37232|19656|1603256|4888|27040|9|15|1|2|130|168|91|67.72|79.90|57.52|2036.58|5234.32|6162.52|7270.90|106.25|3716.36|944.58|1517.96|1624.21|2462.54|2568.79|-4644.56| +2452393|60887|2452511|6972|98729|295328|2680|37232|19656|1603256|4888|27040|31|18|9|2|242|168|47|88.71|198.71|149.03|2334.96|7004.41|4169.37|9339.37|70.04|0.00|4295.80|7004.41|7074.45|11300.21|11370.25|2835.04| +2452393|60887|2452508|5191|98729|295328|2680|37232|19656|1603256|4888|27040|6|1|6|4|219|168|14|44.86|118.87|27.34|1281.42|382.76|628.04|1664.18|2.06|348.31|49.84|34.45|36.51|84.29|86.35|-593.59| +2452393|60887|2452459|11161|98729|295328|2680|37232|19656|1603256|4888|27040|24|3|12|5|175|168|13|55.30|128.84|82.45|603.07|1071.85|718.90|1674.92|10.71|0.00|385.19|1071.85|1082.56|1457.04|1467.75|352.95| +2452393|60887|2452396|8517|98729|295328|2680|37232|19656|1603256|4888|27040|3|7|19|1|183|168|100|51.39|90.96|40.02|5094.00|4002.00|5139.00|9096.00|40.02|0.00|4548.00|4002.00|4042.02|8550.00|8590.02|-1137.00| +2452393|60887|2452477|2670|98729|295328|2680|37232|19656|1603256|4888|27040|54|13|17|2|37|168|6|7.12|20.50|14.76|34.44|88.56|42.72|123.00|7.97|0.00|2.46|88.56|96.53|91.02|98.99|45.84| +2452393|60887|2452503|5853|98729|295328|2680|37232|19656|1603256|4888|27040|9|12|4|5|23|168|39|26.11|49.08|46.62|95.94|1818.18|1018.29|1914.12|109.09|0.00|344.37|1818.18|1927.27|2162.55|2271.64|799.89| +2452393|60887|2452497|10737|98729|295328|2680|37232|19656|1603256|4888|27040|45|15|16|1|106|168|22|5.33|12.09|3.86|181.06|84.92|117.26|265.98|3.39|0.00|13.20|84.92|88.31|98.12|101.51|-32.34| +2452393|60887|2452437|9426|98729|295328|2680|37232|19656|1603256|4888|27040|36|21|14|2|147|168|70|56.78|122.64|74.81|3348.10|5236.70|3974.60|8584.80|471.30|0.00|1716.40|5236.70|5708.00|6953.10|7424.40|1262.10| +2452393|60887|2452397|5994|98729|295328|2680|37232|19656|1603256|4888|27040|54|15|6|4|272|168|30|85.86|106.46|99.00|223.80|2970.00|2575.80|3193.80|178.20|0.00|1277.40|2970.00|3148.20|4247.40|4425.60|394.20| +2452393|60887|2452502|2385|98729|295328|2680|37232|19656|1603256|4888|27040|19|13|1|3|155|168|45|75.12|184.04|115.94|3064.50|5217.30|3380.40|8281.80|4.17|5008.60|828.00|208.70|212.87|1036.70|1040.87|-3171.70| +2452393|60887|2452497|7891|98729|295328|2680|37232|19656|1603256|4888|27040|9|21|4|5|126|168|64|37.47|95.54|3.82|5870.08|244.48|2398.08|6114.56|4.88|0.00|2751.36|244.48|249.36|2995.84|3000.72|-2153.60| +2450975|26287|2450988|4250|61908|1113854|5447|33393|34300|1637354|5325|34471|16|13|15|3|211|169|76|52.16|56.85|44.34|950.76|3369.84|3964.16|4320.60|202.19|0.00|1123.28|3369.84|3572.03|4493.12|4695.31|-594.32| diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/DefaultHBaseKeyFactory.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/DefaultHBaseKeyFactory.java index 5731e45..12c5377 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/DefaultHBaseKeyFactory.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/DefaultHBaseKeyFactory.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hive.hbase; +import java.io.IOException; +import java.util.Properties; + import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazyFactory; import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; @@ -26,9 +29,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import java.io.IOException; -import java.util.Properties; - public class DefaultHBaseKeyFactory extends AbstractHBaseKeyFactory implements HBaseKeyFactory { protected LazySimpleSerDe.SerDeParameters serdeParams; diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java index ca2f40e..aedd843 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDe.java @@ -53,6 +53,7 @@ public static final String HBASE_COMPOSITE_KEY_CLASS = "hbase.composite.key.class"; public static final String HBASE_COMPOSITE_KEY_TYPES = "hbase.composite.key.types"; public static final String HBASE_COMPOSITE_KEY_FACTORY = "hbase.composite.key.factory"; + public static final String HBASE_STRUCT_SERIALIZER_CLASS = "hbase.struct.serialization.class"; public static final String HBASE_SCAN_CACHE = "hbase.scan.cache"; public static final String HBASE_SCAN_CACHEBLOCKS = "hbase.scan.cacheblock"; public static final String HBASE_SCAN_BATCH = "hbase.scan.batch"; @@ -98,7 +99,7 @@ public void initialize(Configuration conf, Properties tbl) cachedHBaseRow = new LazyHBaseRow( (LazySimpleStructObjectInspector) cachedObjectInspector, - serdeParams.getKeyIndex(), serdeParams.getKeyFactory()); + serdeParams.getKeyIndex(), serdeParams.getKeyFactory(), serdeParams.getValueFactories()); serializer = new HBaseRowSerializer(serdeParams); diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeHelper.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeHelper.java index 25a9cfc..9f2f02f 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeHelper.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeHelper.java @@ -41,6 +41,10 @@ import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.avro.AvroObjectInspectorGenerator; import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.util.StringUtils; @@ -371,6 +375,19 @@ public static Schema getSchemaFromFS(String schemaFSUrl, Configuration conf) } /** + * Create the {@link LazyObjectBase lazy field} + * */ + public static LazyObjectBase createLazyField(ColumnMapping[] columnMappings, int fieldID, + ObjectInspector inspector) { + ColumnMapping colMap = columnMappings[fieldID]; + if (colMap.getQualifierName() == null && !colMap.isHbaseRowKey()) { + // a column family + return new LazyHBaseCellMap((LazyMapObjectInspector) inspector); + } + return LazyFactory.createLazyObject(inspector, colMap.getBinaryStorage().get(0)); + } + + /** * Auto-generates the key struct for composite keys * * @param compositeKeyParts map of composite key part name to its type. Usually this would be diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java index 8878eb5..9efa494 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/HBaseSerDeParameters.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.hbase.struct.AvroHBaseValueFactory; import org.apache.hadoop.hive.hbase.struct.DefaultHBaseValueFactory; import org.apache.hadoop.hive.hbase.struct.HBaseValueFactory; +import org.apache.hadoop.hive.hbase.struct.StructHBaseValueFactory; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils; @@ -204,11 +205,21 @@ private static HBaseKeyFactory createKeyFactory(Configuration job, Properties tb for (int i = 0; i < columnMappings.size(); i++) { String serType = getSerializationType(conf, tbl, columnMappings.getColumnsMapping()[i]); - if (serType != null && serType.equals(AVRO_SERIALIZATION_TYPE)) { + if (AVRO_SERIALIZATION_TYPE.equals(serType)) { Schema schema = getSchema(conf, tbl, columnMappings.getColumnsMapping()[i]); - valueFactories.add(new AvroHBaseValueFactory(schema)); + valueFactories.add(new AvroHBaseValueFactory(i, schema)); + } else if (STRUCT_SERIALIZATION_TYPE.equals(serType)) { + String structValueClassName = tbl.getProperty(HBaseSerDe.HBASE_STRUCT_SERIALIZER_CLASS); + + if (structValueClassName == null) { + throw new IllegalArgumentException(HBaseSerDe.HBASE_STRUCT_SERIALIZER_CLASS + + " must be set for hbase columns of type [" + STRUCT_SERIALIZATION_TYPE + "]"); + } + + Class structValueClass = job.getClassByName(structValueClassName); + valueFactories.add(new StructHBaseValueFactory(i, structValueClass)); } else { - valueFactories.add(new DefaultHBaseValueFactory()); + valueFactories.add(new DefaultHBaseValueFactory(i)); } } } catch (Exception e) { diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/LazyHBaseRow.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/LazyHBaseRow.java index 3e8b8fd..6ac8423 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/LazyHBaseRow.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/LazyHBaseRow.java @@ -20,15 +20,15 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hive.hbase.ColumnMappings.ColumnMapping; +import org.apache.hadoop.hive.hbase.struct.HBaseValueFactory; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; -import org.apache.hadoop.hive.serde2.lazy.LazyFactory; import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; import org.apache.hadoop.hive.serde2.lazy.LazyStruct; -import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; @@ -47,18 +47,21 @@ private final int iKey; private final HBaseKeyFactory keyFactory; + private final List valueFactories; public LazyHBaseRow(LazySimpleStructObjectInspector oi) { - this(oi, -1, null); + this(oi, -1, null, null); } /** * Construct a LazyHBaseRow object with the ObjectInspector. */ - public LazyHBaseRow(LazySimpleStructObjectInspector oi, int iKey, HBaseKeyFactory keyFactory) { + public LazyHBaseRow(LazySimpleStructObjectInspector oi, int iKey, HBaseKeyFactory keyFactory, + List valueFactories) { super(oi); this.iKey = iKey; this.keyFactory = keyFactory; + this.valueFactories = valueFactories; } /** @@ -76,13 +79,14 @@ protected LazyObjectBase createLazyField(int fieldID, StructField fieldRef) thro if (fieldID == iKey) { return keyFactory.createKey(fieldRef.getFieldObjectInspector()); } - ColumnMapping colMap = columnsMapping[fieldID]; - if (colMap.qualifierName == null && !colMap.hbaseRowKey) { - // a column family - return new LazyHBaseCellMap((LazyMapObjectInspector) fieldRef.getFieldObjectInspector()); + + if (valueFactories != null) { + return valueFactories.get(fieldID).createValueObject(fieldRef.getFieldObjectInspector()); } - return LazyFactory.createLazyObject(fieldRef.getFieldObjectInspector(), - colMap.binaryStorage.get(0)); + + // fallback to default + return HBaseSerDeHelper.createLazyField(columnsMapping, fieldID, + fieldRef.getFieldObjectInspector()); } /** diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/AvroHBaseValueFactory.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/AvroHBaseValueFactory.java index c341c0a..a2ba827 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/AvroHBaseValueFactory.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/AvroHBaseValueFactory.java @@ -48,7 +48,8 @@ * * @param schema the associated {@link Schema schema} * */ - public AvroHBaseValueFactory(Schema schema) { + public AvroHBaseValueFactory(int fieldID, Schema schema) { + super(fieldID); this.schema = schema; } diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/DefaultHBaseValueFactory.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/DefaultHBaseValueFactory.java index ac2cb57..f7a425d 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/DefaultHBaseValueFactory.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/DefaultHBaseValueFactory.java @@ -21,9 +21,12 @@ import java.util.Properties; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.hbase.ColumnMappings; +import org.apache.hadoop.hive.hbase.HBaseSerDeHelper; import org.apache.hadoop.hive.hbase.HBaseSerDeParameters; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; @@ -35,15 +38,23 @@ public class DefaultHBaseValueFactory implements HBaseValueFactory{ protected LazySimpleSerDe.SerDeParameters serdeParams; + protected ColumnMappings columnMappings; protected HBaseSerDeParameters hbaseParams; protected Properties properties; protected Configuration conf; + private int fieldID; + + public DefaultHBaseValueFactory(int fieldID) { + this.fieldID = fieldID; + } + @Override public void init(HBaseSerDeParameters hbaseParams, Configuration conf, Properties properties) throws SerDeException { this.hbaseParams = hbaseParams; this.serdeParams = hbaseParams.getSerdeParams(); + this.columnMappings = hbaseParams.getColumnMappings(); this.properties = properties; this.conf = conf; } @@ -55,6 +66,11 @@ public ObjectInspector createValueObjectInspector(TypeInfo type) 1, serdeParams.getNullSequence(), serdeParams.isEscaped(), serdeParams.getEscapeChar()); } + @Override + public LazyObjectBase createValueObject(ObjectInspector inspector) throws SerDeException { + return HBaseSerDeHelper.createLazyField(columnMappings.getColumnsMapping(), fieldID, inspector); + } + @Override public byte[] serializeValue(Object object, StructField field) throws IOException { diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java new file mode 100644 index 0000000..8fba79b --- /dev/null +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseStructValue.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.hbase.struct; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyObject; +import org.apache.hadoop.hive.serde2.lazy.LazyStruct; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; + +/** + * This is an extension of LazyStruct. All value structs should extend this class and override the + * {@link LazyStruct#getField(int)} method where fieldID corresponds to the ID of a value in the + * value structure. + *

+ * For example, for a value structure "/part1/part2/part3", part1 will have an id + * 0, part2 will have an id 1 and part3 will have an id 2. Custom + * implementations of getField(fieldID) should return the value corresponding to that fieldID. So, + * for the above example, the value returned for getField(0) should be part1, + * getField(1) should be part2 and getField(2) should be part3. + *

+ *

+ * All implementation are expected to have a constructor of the form
+ * + *

+ * MyCustomStructObject(LazySimpleStructObjectInspector oi, Properties props, Configuration conf, ColumnMapping colMap)
+ * 
+ * + *

+ * */ +public class HBaseStructValue extends LazyStruct { + + /** + * The column family name + */ + protected String familyName; + + /** + * The column qualifier name + */ + protected String qualifierName; + + public HBaseStructValue(LazySimpleStructObjectInspector oi) { + super(oi); + } + + /** + * Set the row data for this LazyStruct. + * + * @see LazyObject#init(ByteArrayRef, int, int) + * + * @param familyName The column family name + * @param qualifierName The column qualifier name + */ + public void init(ByteArrayRef bytes, int start, int length, String familyName, + String qualifierName) { + init(bytes, start, length); + this.familyName = familyName; + this.qualifierName = qualifierName; + } + + @Override + public ArrayList getFieldsAsList() { + ArrayList allFields = new ArrayList(); + + List fields = oi.getAllStructFieldRefs(); + + for (int i = 0; i < fields.size(); i++) { + allFields.add(getField(i)); + } + + return allFields; + } + + /** + * Create an initialize a {@link LazyObject} with the given bytes for the given fieldID. + * + * @param fieldID field for which the object is to be created + * @param bytes value with which the object is to be initialized with + * @return initialized {@link LazyObject} + * */ + public LazyObject toLazyObject(int fieldID, byte[] bytes) { + ObjectInspector fieldOI = oi.getAllStructFieldRefs().get(fieldID).getFieldObjectInspector(); + + LazyObject lazyObject = LazyFactory.createLazyObject(fieldOI); + + ByteArrayRef ref = new ByteArrayRef(); + + ref.setData(bytes); + + // initialize the lazy object + lazyObject.init(ref, 0, ref.getData().length); + + return lazyObject; + } +} \ No newline at end of file diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java index 8722af0..3fead1e 100644 --- a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/HBaseValueFactory.java @@ -22,8 +22,10 @@ import java.util.Properties; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.hbase.HBaseKeyFactory; import org.apache.hadoop.hive.hbase.HBaseSerDeParameters; import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; @@ -53,6 +55,13 @@ void init(HBaseSerDeParameters hbaseParam, Configuration conf, Properties proper ObjectInspector createValueObjectInspector(TypeInfo type) throws SerDeException; /** + * create custom object for hbase value + * + * @param inspector OI create by {@link HBaseKeyFactory#createKeyObjectInspector} + */ + LazyObjectBase createValueObject(ObjectInspector inspector) throws SerDeException; + + /** * Serialize the given hive object * * @param object the object to be serialized diff --git a/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/StructHBaseValueFactory.java b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/StructHBaseValueFactory.java new file mode 100644 index 0000000..e467787 --- /dev/null +++ b/hbase-handler/src/java/org/apache/hadoop/hive/hbase/struct/StructHBaseValueFactory.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.hbase.struct; + +import java.lang.reflect.Constructor; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.hbase.ColumnMappings.ColumnMapping; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; + +/** + * Implementation of {@link HBaseValueFactory} to consume a custom struct + * */ +public class StructHBaseValueFactory extends DefaultHBaseValueFactory { + + private final int fieldID; + private final Constructor constructor; + + public StructHBaseValueFactory(int fieldID, Class structValueClass) throws Exception { + super(fieldID); + this.fieldID = fieldID; + this.constructor = + structValueClass.getDeclaredConstructor(LazySimpleStructObjectInspector.class, + Properties.class, Configuration.class, ColumnMapping.class); + } + + @Override + public LazyObjectBase createValueObject(ObjectInspector inspector) throws SerDeException { + try { + return (T) constructor.newInstance(inspector, properties, hbaseParams.getBaseConfiguration(), + hbaseParams.getColumnMappings().getColumnsMapping()[fieldID]); + } catch (Exception e) { + throw new SerDeException(e); + } + } +} \ No newline at end of file diff --git a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/HBaseTestStructSerializer.java b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/HBaseTestStructSerializer.java new file mode 100644 index 0000000..73d1903 --- /dev/null +++ b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/HBaseTestStructSerializer.java @@ -0,0 +1,75 @@ +package org.apache.hadoop.hive.hbase; + +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hive.hbase.ColumnMappings.ColumnMapping; +import org.apache.hadoop.hive.hbase.struct.HBaseStructValue; +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyObject; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; + +/** + * Test specific implementation of {@link org.apache.hadoop.hive.serde2.lazy.LazyStruct} + */ +public class HBaseTestStructSerializer extends HBaseStructValue { + + protected byte[] bytes; + protected String bytesAsString; + protected Properties tbl; + protected Configuration conf; + protected ColumnMapping colMapping; + protected String testValue; + + public HBaseTestStructSerializer(LazySimpleStructObjectInspector oi, Properties tbl, + Configuration conf, ColumnMapping colMapping) { + super(oi); + this.tbl = tbl; + this.conf = conf; + this.colMapping = colMapping; + } + + @Override + public void init(ByteArrayRef bytes, int start, int length) { + this.bytes = bytes.getData(); + } + + @Override + public Object getField(int fieldID) { + if (bytesAsString == null) { + bytesAsString = Bytes.toString(bytes).trim(); + } + + // Randomly pick the character corresponding to the field id and convert it to byte array + byte[] fieldBytes = new byte[] { (byte) bytesAsString.charAt(fieldID) }; + + return toLazyObject(fieldID, fieldBytes); + } + + /** + * Create an initialize a {@link LazyObject} with the given bytes for the given fieldID. + * + * @param fieldID field for which the object is to be created + * @param bytes value with which the object is to be initialized with + * + * @return initialized {@link LazyObject} + * */ + @Override + public LazyObject toLazyObject(int fieldID, byte[] bytes) { + ObjectInspector fieldOI = oi.getAllStructFieldRefs().get(fieldID).getFieldObjectInspector(); + + LazyObject lazyObject = LazyFactory.createLazyObject(fieldOI); + + ByteArrayRef ref = new ByteArrayRef(); + + ref.setData(bytes); + + // initialize the lazy object + lazyObject.init(ref, 0, ref.getData().length); + + return lazyObject; + } +} \ No newline at end of file diff --git a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseSerDe.java b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseSerDe.java index 241818a..42b2444 100644 --- a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseSerDe.java +++ b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseSerDe.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Properties; +import junit.framework.Assert; import junit.framework.TestCase; import org.apache.avro.Schema; @@ -61,6 +62,7 @@ import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.hive.serde2.lazy.LazyPrimitive; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hadoop.hive.serde2.lazy.LazyStruct; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.io.BooleanWritable; @@ -135,6 +137,27 @@ " ]\n" + "}"; + private static final String EXPECTED_DESERIALIZED_AVRO_STRING = + "{\"key\":\"test-row1\",\"cola_avro\":{\"arecord\":{\"int1\":42,\"boolean1\":true," + + "\"long1\":42432234234}}}"; + + private static final String EXPECTED_DESERIALIZED_AVRO_STRING_2 = + "{\"key\":\"test-row1\"," + + "\"cola_avro\":{\"employeename\":\"Avro Employee1\"," + + "\"employeeid\":11111,\"age\":25,\"gender\":\"FEMALE\"," + + "\"contactinfo\":{\"address\":[{\"address1\":\"Avro First Address1\",\"address2\":" + + "\"Avro Second Address1\",\"city\":\"Avro City1\",\"zipcode\":123456,\"county\":" + + "{0:{\"areacode\":999,\"number\":1234567890}},\"aliases\":null,\"metadata\":" + + "{\"testkey\":\"testvalue\"}},{\"address1\":\"Avro First Address1\",\"address2\":" + + "\"Avro Second Address1\",\"city\":\"Avro City1\",\"zipcode\":123456,\"county\":" + + "{0:{\"areacode\":999,\"number\":1234567890}},\"aliases\":null,\"metadata\":" + + "{\"testkey\":\"testvalue\"}}],\"homephone\":{\"areacode\":999,\"number\":1234567890}," + + "\"officephone\":{\"areacode\":999,\"number\":1234455555}}}}"; + + private static final String EXPECTED_DESERIALIZED_AVRO_STRING_3 = + "{\"key\":\"test-row1\",\"cola_avro\":{\"arecord\":{\"int1\":42,\"string1\":\"test\"," + + "\"boolean1\":true,\"long1\":42432234234}}}"; + /** * Test the default behavior of the Lazy family of objects and object inspectors. */ @@ -1047,7 +1070,8 @@ public void testHBaseSerDeWithAvroSchemaInline() throws SerDeException, IOExcept Properties tbl = createPropertiesForHiveAvroSchemaInline(); serDe.initialize(conf, tbl); - deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData); + deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData, + EXPECTED_DESERIALIZED_AVRO_STRING); } private Properties createPropertiesForHiveAvroSchemaInline() { @@ -1092,7 +1116,8 @@ public void testHBaseSerDeWithForwardEvolvedSchema() throws SerDeException, IOEx Properties tbl = createPropertiesForHiveAvroForwardEvolvedSchema(); serDe.initialize(conf, tbl); - deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData); + deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData, + EXPECTED_DESERIALIZED_AVRO_STRING_3); } private Properties createPropertiesForHiveAvroForwardEvolvedSchema() { @@ -1136,7 +1161,8 @@ public void testHBaseSerDeWithBackwardEvolvedSchema() throws SerDeException, IOE Properties tbl = createPropertiesForHiveAvroBackwardEvolvedSchema(); serDe.initialize(conf, tbl); - deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData); + deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData, + EXPECTED_DESERIALIZED_AVRO_STRING); } private Properties createPropertiesForHiveAvroBackwardEvolvedSchema() { @@ -1185,7 +1211,8 @@ public void testHBaseSerDeWithAvroSerClass() throws SerDeException, IOException Properties tbl = createPropertiesForHiveAvroSerClass(); serDe.initialize(conf, tbl); - deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData); + deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData, + EXPECTED_DESERIALIZED_AVRO_STRING_2); } private Properties createPropertiesForHiveAvroSerClass() { @@ -1243,7 +1270,8 @@ public void testHBaseSerDeWithAvroSchemaUrl() throws SerDeException, IOException Properties tbl = createPropertiesForHiveAvroSchemaUrl(onHDFS); serDe.initialize(conf, tbl); - deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData); + deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData, + EXPECTED_DESERIALIZED_AVRO_STRING); } finally { // Teardown the cluster if (miniDfs != null) { @@ -1298,7 +1326,8 @@ public void testHBaseSerDeWithAvroExternalSchema() throws SerDeException, IOExce Properties tbl = createPropertiesForHiveAvroExternalSchema(); serDe.initialize(conf, tbl); - deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData); + deserializeAndSerializeHiveAvro(serDe, r, p, expectedFieldsData, + EXPECTED_DESERIALIZED_AVRO_STRING_2); } private Properties createPropertiesForHiveAvroExternalSchema() { @@ -1389,8 +1418,87 @@ private Properties createPropertiesForHiveAvroColumnFamilyMap() { return tbl; } + public void testHBaseSerDeCustomStructValue() throws IOException, SerDeException { + + byte[] cfa = "cola".getBytes(); + byte[] qualStruct = "struct".getBytes(); + + TestStruct testStruct = new TestStruct("A", "B", "C", false, (byte) 0); + byte[] key = testStruct.getBytes(); + // Data + List kvs = new ArrayList(); + + byte[] testData = testStruct.getBytes(); + kvs.add(new KeyValue(key, cfa, qualStruct, testData)); + + Result r = new Result(kvs); + byte[] putKey = testStruct.getBytesWithDelimiters(); + + Put p = new Put(putKey); + + // Post serialization, separators are automatically inserted between different fields in the + // struct. Currently there is not way to disable that. So the work around here is to pad the + // data with the separator bytes before creating a "Put" object + p.add(new KeyValue(putKey, cfa, qualStruct, Bytes.padTail(testData, 2))); + + // Create, initialize, and test the SerDe + HBaseSerDe serDe = new HBaseSerDe(); + Configuration conf = new Configuration(); + Properties tbl = createPropertiesForValueStruct(); + serDe.initialize(conf, tbl); + + deserializeAndSerializeHBaseValueStruct(serDe, r, p); + + } + + private Properties createPropertiesForValueStruct() { + Properties tbl = new Properties(); + tbl.setProperty("cola.struct.serialization.type", "struct"); + tbl.setProperty("cola.struct.test.value", "test value"); + tbl.setProperty(HBaseSerDe.HBASE_STRUCT_SERIALIZER_CLASS, + "org.apache.hadoop.hive.hbase.HBaseTestStructSerializer"); + tbl.setProperty(serdeConstants.LIST_COLUMNS, "key,astring"); + tbl.setProperty(serdeConstants.LIST_COLUMN_TYPES, + "struct,struct"); + tbl.setProperty(HBaseSerDe.HBASE_COLUMNS_MAPPING, ":key,cola:struct"); + tbl.setProperty(HBaseSerDe.HBASE_COMPOSITE_KEY_CLASS, + "org.apache.hadoop.hive.hbase.HBaseTestCompositeKey"); + return tbl; + } + + private void deserializeAndSerializeHBaseValueStruct(HBaseSerDe serDe, Result r, Put p) + throws SerDeException, IOException { + StructObjectInspector soi = (StructObjectInspector) serDe.getObjectInspector(); + + List fieldRefs = soi.getAllStructFieldRefs(); + + Object row = serDe.deserialize(new ResultWritable(r)); + + Object fieldData = null; + for (int j = 0; j < fieldRefs.size(); j++) { + fieldData = soi.getStructFieldData(row, fieldRefs.get(j)); + assertNotNull(fieldData); + if (fieldData instanceof LazyStruct) { + assertEquals(((LazyStruct) fieldData).getField(0).toString(), "A"); + assertEquals(((LazyStruct) fieldData).getField(1).toString(), "B"); + assertEquals(((LazyStruct) fieldData).getField(2).toString(), "C"); + } else { + Assert.fail("fieldData should be an instance of LazyStruct"); + } + } + + assertEquals( + "{\"key\":{\"col1\":\"A\",\"col2\":\"B\",\"col3\":\"C\"},\"astring\":{\"col1\":\"A\",\"col2\":\"B\",\"col3\":\"C\"}}", + SerDeUtils.getJSONString(row, soi)); + + // Now serialize + Put put = ((PutWritable) serDe.serialize(row, soi)).getPut(); + + assertEquals("Serialized put:", p.toString(), put.toString()); + } + private void deserializeAndSerializeHiveAvro(HBaseSerDe serDe, Result r, Put p, - Object[] expectedFieldsData) + Object[] expectedFieldsData, String expectedDeserializedAvroString) throws SerDeException, IOException { StructObjectInspector soi = (StructObjectInspector) serDe.getObjectInspector(); @@ -1403,6 +1511,8 @@ private void deserializeAndSerializeHiveAvro(HBaseSerDe serDe, Result r, Put p, assertNotNull(fieldData); assertEquals(expectedFieldsData[j], fieldData.toString().trim()); } + + assertEquals(expectedDeserializedAvroString, SerDeUtils.getJSONString(row, soi)); // Now serialize Put put = ((PutWritable) serDe.serialize(row, soi)).getPut(); diff --git a/hbase-handler/src/test/queries/positive/hbase_ppd_join.q b/hbase-handler/src/test/queries/positive/hbase_ppd_join.q new file mode 100644 index 0000000..2436c19 --- /dev/null +++ b/hbase-handler/src/test/queries/positive/hbase_ppd_join.q @@ -0,0 +1,61 @@ +--create hive hbase table 1 +drop table if exists hive1_tbl_data_hbase1; +drop table if exists hive1_tbl_data_hbase2; +drop view if exists hive1_view_data_hbase1; +drop view if exists hive1_view_data_hbase2; + +CREATE TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) +STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' +WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" +) +; + +--create hive view for the above hive table 1 +CREATE VIEW hive1_view_data_hbase1 +AS +SELECT * +FROM hive1_tbl_data_hbase1 +WHERE PK_COLUM >='4000-00000' +and PK_COLUM <='4000-99999' +AND COL_UPDATED_DATE IS NOT NULL +; + + +--load data to hive table 1 +insert into table hive1_tbl_data_hbase1 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','4000-10000' from src where key = 100; + +--create hive hbase table 2 +CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) +STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' +WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" +) +; + +--create hive view for the above hive hbase table 2 +CREATE VIEW hive1_view_data_hbase2 +AS +SELECT * +FROM hive1_tbl_data_hbase2 +where COL_UPDATED_DATE IS NOT NULL +; + + +--load data to hive hbase table 2 +insert into table hive1_tbl_data_hbase2 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','00001' from src where key = 100; +; + +set hive.optimize.ppd = true; +set hive.auto.convert.join=false; + +-- do not return value without fix + +select x.FIRST_NAME1, x.EMAIL1 from ( +select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x; + +set hive.auto.convert.join=true; + +-- return value with/without fix + +select x.FIRST_NAME1, x.EMAIL1 from ( +select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x; + diff --git a/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out b/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out new file mode 100644 index 0000000..83a3015 --- /dev/null +++ b/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out @@ -0,0 +1,154 @@ +PREHOOK: query: --create hive hbase table 1 +drop table if exists hive1_tbl_data_hbase1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: --create hive hbase table 1 +drop table if exists hive1_tbl_data_hbase1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists hive1_tbl_data_hbase2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists hive1_tbl_data_hbase2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop view if exists hive1_view_data_hbase1 +PREHOOK: type: DROPVIEW +POSTHOOK: query: drop view if exists hive1_view_data_hbase1 +POSTHOOK: type: DROPVIEW +PREHOOK: query: drop view if exists hive1_view_data_hbase2 +PREHOOK: type: DROPVIEW +POSTHOOK: query: drop view if exists hive1_view_data_hbase2 +POSTHOOK: type: DROPVIEW +PREHOOK: query: CREATE TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) +STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' +WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@hive1_tbl_data_hbase1 +POSTHOOK: query: CREATE TABLE hive1_tbl_data_hbase1 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) +STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' +WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@hive1_tbl_data_hbase1 +PREHOOK: query: --create hive view for the above hive table 1 +CREATE VIEW hive1_view_data_hbase1 +AS +SELECT * +FROM hive1_tbl_data_hbase1 +WHERE PK_COLUM >='4000-00000' +and PK_COLUM <='4000-99999' +AND COL_UPDATED_DATE IS NOT NULL +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@hive1_tbl_data_hbase1 +PREHOOK: Output: database:default +PREHOOK: Output: default@hive1_view_data_hbase1 +POSTHOOK: query: --create hive view for the above hive table 1 +CREATE VIEW hive1_view_data_hbase1 +AS +SELECT * +FROM hive1_tbl_data_hbase1 +WHERE PK_COLUM >='4000-00000' +and PK_COLUM <='4000-99999' +AND COL_UPDATED_DATE IS NOT NULL +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@hive1_tbl_data_hbase1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@hive1_view_data_hbase1 +PREHOOK: query: --load data to hive table 1 +insert into table hive1_tbl_data_hbase1 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','4000-10000' from src where key = 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@hive1_tbl_data_hbase1 +POSTHOOK: query: --load data to hive table 1 +insert into table hive1_tbl_data_hbase1 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','4000-10000' from src where key = 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@hive1_tbl_data_hbase1 +PREHOOK: query: --create hive hbase table 2 +CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) +STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' +WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@hive1_tbl_data_hbase2 +POSTHOOK: query: --create hive hbase table 2 +CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) +STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' +WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@hive1_tbl_data_hbase2 +PREHOOK: query: --create hive view for the above hive hbase table 2 +CREATE VIEW hive1_view_data_hbase2 +AS +SELECT * +FROM hive1_tbl_data_hbase2 +where COL_UPDATED_DATE IS NOT NULL +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@hive1_tbl_data_hbase2 +PREHOOK: Output: database:default +PREHOOK: Output: default@hive1_view_data_hbase2 +POSTHOOK: query: --create hive view for the above hive hbase table 2 +CREATE VIEW hive1_view_data_hbase2 +AS +SELECT * +FROM hive1_tbl_data_hbase2 +where COL_UPDATED_DATE IS NOT NULL +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@hive1_tbl_data_hbase2 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@hive1_view_data_hbase2 +PREHOOK: query: --load data to hive hbase table 2 +insert into table hive1_tbl_data_hbase2 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','00001' from src where key = 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@hive1_tbl_data_hbase2 +POSTHOOK: query: --load data to hive hbase table 2 +insert into table hive1_tbl_data_hbase2 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','00001' from src where key = 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@hive1_tbl_data_hbase2 +PREHOOK: query: -- do not return value without fix + +select x.FIRST_NAME1, x.EMAIL1 from ( +select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x +PREHOOK: type: QUERY +PREHOOK: Input: default@hive1_tbl_data_hbase1 +PREHOOK: Input: default@hive1_tbl_data_hbase2 +PREHOOK: Input: default@hive1_view_data_hbase1 +PREHOOK: Input: default@hive1_view_data_hbase2 +#### A masked pattern was here #### +POSTHOOK: query: -- do not return value without fix + +select x.FIRST_NAME1, x.EMAIL1 from ( +select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x +POSTHOOK: type: QUERY +POSTHOOK: Input: default@hive1_tbl_data_hbase1 +POSTHOOK: Input: default@hive1_tbl_data_hbase2 +POSTHOOK: Input: default@hive1_view_data_hbase1 +POSTHOOK: Input: default@hive1_view_data_hbase2 +#### A masked pattern was here #### +john john@hotmail.com +PREHOOK: query: -- return value with/without fix + +select x.FIRST_NAME1, x.EMAIL1 from ( +select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x +PREHOOK: type: QUERY +PREHOOK: Input: default@hive1_tbl_data_hbase1 +PREHOOK: Input: default@hive1_tbl_data_hbase2 +PREHOOK: Input: default@hive1_view_data_hbase1 +PREHOOK: Input: default@hive1_view_data_hbase2 +#### A masked pattern was here #### +POSTHOOK: query: -- return value with/without fix + +select x.FIRST_NAME1, x.EMAIL1 from ( +select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x +POSTHOOK: type: QUERY +POSTHOOK: Input: default@hive1_tbl_data_hbase1 +POSTHOOK: Input: default@hive1_tbl_data_hbase2 +POSTHOOK: Input: default@hive1_view_data_hbase1 +POSTHOOK: Input: default@hive1_view_data_hbase2 +#### A masked pattern was here #### +john john@hotmail.com diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java index 6c54c05..11d0743 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/cli/SemanticAnalysis/HCatSemanticAnalyzer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -104,6 +105,7 @@ public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) case HiveParser.TOK_ALTERVIEW_DROPPARTS: case HiveParser.TOK_ALTERVIEW_PROPERTIES: case HiveParser.TOK_ALTERVIEW_RENAME: + case HiveParser.TOK_ALTERVIEW: case HiveParser.TOK_CREATEVIEW: case HiveParser.TOK_DROPVIEW: @@ -359,7 +361,7 @@ protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext cntxt, Hive hive AlterTableDesc alterTable = work.getAlterTblDesc(); if (alterTable != null) { Table table = hive.getTable(SessionState.get().getCurrentDatabase(), - alterTable.getOldName(), false); + Utilities.getDbTableName(alterTable.getOldName())[1], false); Partition part = null; if (alterTable.getPartSpec() != null) { diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index c2a0f5f..5f9379b 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -90,6 +90,7 @@ public static void setup() throws Exception { File workDir = handleWorkDir(); conf.set("yarn.scheduler.capacity.root.queues", "default"); conf.set("yarn.scheduler.capacity.root.default.capacity", "100"); + conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem"); fs = FileSystem.get(conf); System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath()); diff --git a/hcatalog/hcatalog-pig-adapter/pom.xml b/hcatalog/hcatalog-pig-adapter/pom.xml index 4d2ca51..7589efb 100644 --- a/hcatalog/hcatalog-pig-adapter/pom.xml +++ b/hcatalog/hcatalog-pig-adapter/pom.xml @@ -53,6 +53,13 @@ tests test + + org.apache.hive + hive-exec + ${project.version} + test-jar + test + diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java index ee3e750..5eabba1 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoader.java @@ -28,10 +28,12 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Set; import org.apache.commons.io.FileUtils; @@ -42,6 +44,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.io.IOConstants; +import org.apache.hadoop.hive.ql.io.StorageFormats; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; @@ -69,12 +73,16 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.junit.Assert.*; +import static org.junit.Assume.assumeTrue; +@RunWith(Parameterized.class) public class TestHCatLoader { private static final Logger LOG = LoggerFactory.getLogger(TestHCatLoader.class); private static final String TEST_DATA_DIR = HCatUtil.makePathASafeFileName(System.getProperty("java.io.tmpdir") + @@ -91,9 +99,30 @@ private Driver driver; private Map> basicInputData; - protected String storageFormat() { - return "RCFILE tblproperties('hcat.isd'='org.apache.hive.hcatalog.rcfile.RCFileInputDriver'," + - "'hcat.osd'='org.apache.hive.hcatalog.rcfile.RCFileOutputDriver')"; + private static final Map> DISABLED_STORAGE_FORMATS = + new HashMap>() {{ + put(IOConstants.AVRO, new HashSet() {{ + add("testReadDataBasic"); + add("testReadPartitionedBasic"); + add("testProjectionsBasic"); + add("testSchemaLoadPrimitiveTypes"); + }}); + put(IOConstants.PARQUETFILE, new HashSet() {{ + add("testReadDataBasic"); + add("testReadPartitionedBasic"); + add("testProjectionsBasic"); + }}); + }}; + + private String storageFormat; + + @Parameterized.Parameters + public static Collection generateParameters() { + return StorageFormats.names(); + } + + public TestHCatLoader(String storageFormat) { + this.storageFormat = storageFormat; } private void dropTable(String tablename) throws IOException, CommandNeedRetryException { @@ -105,7 +134,7 @@ static void dropTable(String tablename, Driver driver) throws IOException, Comma } private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException { - createTable(tablename, schema, partitionedBy, driver, storageFormat()); + createTable(tablename, schema, partitionedBy, driver, storageFormat); } static void createTable(String tablename, String schema, String partitionedBy, Driver driver, String storageFormat) @@ -209,17 +238,18 @@ public void setup() throws Exception { server.registerQuery("D = load '" + COMPLEX_FILE_NAME + "' as (name:chararray, studentid:int, contact:tuple(phno:chararray,email:chararray), currently_registered_courses:bag{innertup:tuple(course:chararray)}, current_grades:map[ ] , phnos :bag{innertup:tuple(phno:chararray,type:chararray)});", ++i); server.registerQuery("store D into '" + COMPLEX_TABLE + "' using org.apache.hive.hcatalog.pig.HCatStorer();", ++i); server.executeBatch(); - } @After public void tearDown() throws Exception { try { - dropTable(BASIC_TABLE); - dropTable(COMPLEX_TABLE); - dropTable(PARTITIONED_TABLE); - dropTable(SPECIFIC_SIZE_TABLE); - dropTable(AllTypesTable.ALL_PRIMITIVE_TYPES_TABLE); + if (driver != null) { + dropTable(BASIC_TABLE); + dropTable(COMPLEX_TABLE); + dropTable(PARTITIONED_TABLE); + dropTable(SPECIFIC_SIZE_TABLE); + dropTable(AllTypesTable.ALL_PRIMITIVE_TYPES_TABLE); + } } finally { FileUtils.deleteDirectory(new File(TEST_DATA_DIR)); } @@ -227,6 +257,7 @@ public void tearDown() throws Exception { @Test public void testSchemaLoadBasic() throws IOException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); PigServer server = new PigServer(ExecType.LOCAL); @@ -241,23 +272,28 @@ public void testSchemaLoadBasic() throws IOException { assertTrue(Xfields.get(1).type == DataType.CHARARRAY); } + /** * Test that we properly translate data types in Hive/HCat table schema into Pig schema */ @Test public void testSchemaLoadPrimitiveTypes() throws IOException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); AllTypesTable.testSchemaLoadPrimitiveTypes(); } + /** * Test that value from Hive table are read properly in Pig */ @Test public void testReadDataPrimitiveTypes() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); AllTypesTable.testReadDataPrimitiveTypes(); } @Test public void testReadDataBasic() throws IOException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); PigServer server = new PigServer(ExecType.LOCAL); server.registerQuery("X = load '" + BASIC_TABLE + "' using org.apache.hive.hcatalog.pig.HCatLoader();"); @@ -279,6 +315,7 @@ public void testReadDataBasic() throws IOException { @Test public void testSchemaLoadComplex() throws IOException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); PigServer server = new PigServer(ExecType.LOCAL); @@ -337,6 +374,7 @@ public void testSchemaLoadComplex() throws IOException { @Test public void testReadPartitionedBasic() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); PigServer server = new PigServer(ExecType.LOCAL); driver.run("select * from " + PARTITIONED_TABLE); @@ -404,6 +442,7 @@ public void testReadPartitionedBasic() throws IOException, CommandNeedRetryExcep @Test public void testProjectionsBasic() throws IOException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); PigServer server = new PigServer(ExecType.LOCAL); @@ -453,6 +492,7 @@ public void testProjectionsBasic() throws IOException { @Test public void testColumnarStorePushdown() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); String PIGOUTPUT_DIR = TEST_DATA_DIR+ "/colpushdownop"; String PIG_FILE = "test.pig"; String expectedCols = "0,1"; @@ -486,6 +526,7 @@ public void testColumnarStorePushdown() throws Exception { @Test public void testGetInputBytes() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); File file = new File(TEST_WAREHOUSE_DIR + "/" + SPECIFIC_SIZE_TABLE + "/part-m-00000"); file.deleteOnExit(); RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw"); @@ -501,6 +542,7 @@ public void testGetInputBytes() throws Exception { @Test public void testConvertBooleanToInt() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); String tbl = "test_convert_boolean_to_int"; String inputFileName = TEST_DATA_DIR + "/testConvertBooleanToInt/data.txt"; File inputDataDir = new File(inputFileName).getParentFile(); @@ -600,7 +642,11 @@ private static void testSchemaLoadPrimitiveTypes() throws IOException { * Test that value from Hive table are read properly in Pig */ private static void testReadDataPrimitiveTypes() throws Exception { - PigServer server = new PigServer(ExecType.LOCAL); + // testConvertBooleanToInt() sets HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER=true, and + // might be the last one to call HCatContext.INSTANCE.setConf(). Make sure setting is false. + Properties properties = new Properties(); + properties.setProperty(HCatConstants.HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER, "false"); + PigServer server = new PigServer(ExecType.LOCAL, properties); server.registerQuery("X = load '" + ALL_PRIMITIVE_TYPES_TABLE + "' using " + HCatLoader.class.getName() + "();"); Iterator XIter = server.openIterator("X"); int numTuplesRead = 0; @@ -608,22 +654,26 @@ private static void testReadDataPrimitiveTypes() throws Exception { Tuple t = XIter.next(); assertEquals(HCatFieldSchema.Type.numPrimitiveTypes(), t.size()); int colPos = 0; - for(Object referenceData : primitiveRows[numTuplesRead]) { - if(referenceData == null) { - assertTrue("rowNum=" + numTuplesRead + " colNum=" + colPos + " Reference data is null; actual " + - t.get(colPos), t.get(colPos) == null); - } - else if(referenceData instanceof java.util.Date) { - assertTrue("rowNum=" + numTuplesRead + " colNum=" + colPos + " Reference data=" + ((java.util.Date)referenceData).getTime() + " actual=" + - ((DateTime)t.get(colPos)).getMillis() + "; types=(" + referenceData.getClass() + "," + t.get(colPos).getClass() + ")", + for (Object referenceData : primitiveRows[numTuplesRead]) { + if (referenceData == null) { + assertTrue("rowNum=" + numTuplesRead + " colNum=" + colPos + + " Reference data is null; actual " + + t.get(colPos), t.get(colPos) == null); + } else if (referenceData instanceof java.util.Date) { + // Note that here we ignore nanos part of Hive Timestamp since nanos are dropped when + // reading Hive from Pig by design. + assertTrue("rowNum=" + numTuplesRead + " colNum=" + colPos + + " Reference data=" + ((java.util.Date)referenceData).getTime() + + " actual=" + ((DateTime)t.get(colPos)).getMillis() + + "; types=(" + referenceData.getClass() + "," + t.get(colPos).getClass() + ")", ((java.util.Date)referenceData).getTime()== ((DateTime)t.get(colPos)).getMillis()); - //note that here we ignore nanos part of Hive Timestamp since nanos are dropped when reading Hive from Pig by design - } - else { - assertTrue("rowNum=" + numTuplesRead + " colNum=" + colPos + " Reference data=" + referenceData + " actual=" + - t.get(colPos) + "; types=(" + referenceData.getClass() + "," + t.get(colPos).getClass() + ")", + } else { + // Doing String comps here as value objects in Hive in Pig are different so equals() + // doesn't work. + assertTrue("rowNum=" + numTuplesRead + " colNum=" + colPos + + " Reference data=" + referenceData + " actual=" + t.get(colPos) + + "; types=(" + referenceData.getClass() + "," + t.get(colPos).getClass() + ") ", referenceData.toString().equals(t.get(colPos).toString())); - //doing String comps here as value objects in Hive in Pig are different so equals() doesn't work } colPos++; } @@ -633,10 +683,10 @@ else if(referenceData instanceof java.util.Date) { } private static void setupAllTypesTable(Driver driver) throws Exception { String[] primitiveData = new String[primitiveRows.length]; - for(int i = 0; i < primitiveRows.length; i++) { + for (int i = 0; i < primitiveRows.length; i++) { Object[] rowData = primitiveRows[i]; StringBuilder row = new StringBuilder(); - for(Object cell : rowData) { + for (Object cell : rowData) { row.append(row.length() == 0 ? "" : "\t").append(cell == null ? null : cell); } primitiveData[i] = row.toString(); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java index 40ec597..447f39f 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java @@ -18,19 +18,25 @@ */ package org.apache.hive.hcatalog.pig; +import com.google.common.collect.ImmutableSet; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; - -import junit.framework.Assert; +import java.util.Map; +import java.util.Set; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.io.IOConstants; +import org.apache.hadoop.hive.ql.io.StorageFormats; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; @@ -46,12 +52,20 @@ import org.apache.pig.impl.logicalLayer.schema.Schema; import org.apache.pig.impl.logicalLayer.schema.Schema.FieldSchema; +import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assume.assumeTrue; + +@RunWith(Parameterized.class) public class TestHCatLoaderComplexSchema { //private static MiniCluster cluster = MiniCluster.buildCluster(); @@ -59,13 +73,33 @@ //private static Properties props; private static final Logger LOG = LoggerFactory.getLogger(TestHCatLoaderComplexSchema.class); - private void dropTable(String tablename) throws IOException, CommandNeedRetryException { - driver.run("drop table " + tablename); + private static final Map> DISABLED_STORAGE_FORMATS = + new HashMap>() {{ + put(IOConstants.AVRO, new HashSet() {{ + add("testSyntheticComplexSchema"); + add("testTupleInBagInTupleInBag"); + add("testMapWithComplexData"); + }}); + put(IOConstants.PARQUETFILE, new HashSet() {{ + add("testSyntheticComplexSchema"); + add("testTupleInBagInTupleInBag"); + add("testMapWithComplexData"); + }}); + }}; + + private String storageFormat; + + @Parameterized.Parameters + public static Collection generateParameters() { + return StorageFormats.names(); + } + + public TestHCatLoaderComplexSchema(String storageFormat) { + this.storageFormat = storageFormat; } - protected String storageFormat() { - return "RCFILE tblproperties('hcat.isd'='org.apache.hive.hcatalog.rcfile.RCFileInputDriver'," + - "'hcat.osd'='org.apache.hive.hcatalog.rcfile.RCFileOutputDriver')"; + private void dropTable(String tablename) throws IOException, CommandNeedRetryException { + driver.run("drop table " + tablename); } private void createTable(String tablename, String schema, String partitionedBy) throws IOException, CommandNeedRetryException { @@ -74,7 +108,7 @@ private void createTable(String tablename, String schema, String partitionedBy) if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) { createTable = createTable + "partitioned by (" + partitionedBy + ") "; } - createTable = createTable + "stored as " + storageFormat(); + createTable = createTable + "stored as " + storageFormat; LOG.info("Creating table:\n {}", createTable); CommandProcessorResponse result = driver.run(createTable); int retCode = result.getResponseCode(); @@ -89,7 +123,6 @@ private void createTable(String tablename, String schema) throws IOException, Co @BeforeClass public static void setUpBeforeClass() throws Exception { - HiveConf hiveConf = new HiveConf(TestHCatLoaderComplexSchema.class); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); @@ -98,7 +131,6 @@ public static void setUpBeforeClass() throws Exception { SessionState.start(new CliSessionState(hiveConf)); //props = new Properties(); //props.setProperty("fs.default.name", cluster.getProperties().getProperty("fs.default.name")); - } private static final TupleFactory tf = TupleFactory.getInstance(); @@ -118,6 +150,7 @@ private DataBag b(Tuple... objects) { */ @Test public void testSyntheticComplexSchema() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); String pigSchema = "a: " + "(" + @@ -186,7 +219,6 @@ public void testSyntheticComplexSchema() throws Exception { verifyWriteRead("testSyntheticComplexSchema", pigSchema, tableSchema, data, false); verifyWriteRead("testSyntheticComplexSchema2", pigSchema, tableSchema2, data, true); verifyWriteRead("testSyntheticComplexSchema2", pigSchema, tableSchema2, data, false); - } private void verifyWriteRead(String tablename, String pigSchema, String tableSchema, List data, boolean provideSchemaToStorer) @@ -219,7 +251,7 @@ private void verifyWriteRead(String tablename, String pigSchema, String tableSch } Schema dumpedXSchema = server.dumpSchema("X"); - Assert.assertEquals( + assertEquals( "expected " + dumpedASchema + " but was " + dumpedXSchema + " (ignoring field names)", "", compareIgnoreFiledNames(dumpedASchema, dumpedXSchema)); @@ -230,14 +262,14 @@ private void verifyWriteRead(String tablename, String pigSchema, String tableSch } private void compareTuples(Tuple t1, Tuple t2) throws ExecException { - Assert.assertEquals("Tuple Sizes don't match", t1.size(), t2.size()); + assertEquals("Tuple Sizes don't match", t1.size(), t2.size()); for (int i = 0; i < t1.size(); i++) { Object f1 = t1.get(i); Object f2 = t2.get(i); - Assert.assertNotNull("left", f1); - Assert.assertNotNull("right", f2); + assertNotNull("left", f1); + assertNotNull("right", f2); String msg = "right: " + f1 + ", left: " + f2; - Assert.assertEquals(msg, noOrder(f1.toString()), noOrder(f2.toString())); + assertEquals(msg, noOrder(f1.toString()), noOrder(f2.toString())); } } @@ -278,6 +310,7 @@ private String compareIgnoreFiledNames(Schema expected, Schema got) throws Front */ @Test public void testTupleInBagInTupleInBag() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); String pigSchema = "a: { b : ( c: { d: (i : long) } ) }"; String tableSchema = "a array< array< bigint > >"; @@ -297,11 +330,11 @@ public void testTupleInBagInTupleInBag() throws Exception { verifyWriteRead("TupleInBagInTupleInBag3", pigSchema, tableSchema2, data, true); verifyWriteRead("TupleInBagInTupleInBag4", pigSchema, tableSchema2, data, false); - } @Test public void testMapWithComplexData() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); String pigSchema = "a: long, b: map[]"; String tableSchema = "a bigint, b map>"; @@ -320,6 +353,5 @@ public void testMapWithComplexData() throws Exception { } verifyWriteRead("testMapWithComplexData", pigSchema, tableSchema, data, true); verifyWriteRead("testMapWithComplexData2", pigSchema, tableSchema, data, false); - } } diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorer.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorer.java index 763af9f..a380f61 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorer.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorer.java @@ -18,18 +18,27 @@ */ package org.apache.hive.hcatalog.pig; +import com.google.common.collect.ImmutableSet; + import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.math.BigDecimal; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Properties; +import java.util.Set; import org.apache.hadoop.hive.ql.CommandNeedRetryException; +import org.apache.hadoop.hive.ql.io.IOConstants; +import org.apache.hadoop.hive.ql.io.StorageFormats; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hive.hcatalog.HcatTestUtils; @@ -47,20 +56,92 @@ import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import org.junit.Assert; +import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.junit.Assert.*; +import static org.junit.Assume.assumeTrue; + +@RunWith(Parameterized.class) public class TestHCatStorer extends HCatBaseTest { private static final Logger LOG = LoggerFactory.getLogger(TestHCatStorer.class); private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data"; + private static final Map> DISABLED_STORAGE_FORMATS = + new HashMap>() {{ + put(IOConstants.AVRO, new HashSet() {{ + add("testBagNStruct"); + add("testDateCharTypes"); + add("testDynamicPartitioningMultiPartColsInDataNoSpec"); + add("testDynamicPartitioningMultiPartColsInDataPartialSpec"); + add("testMultiPartColsInData"); + add("testPartColsInData"); + add("testStoreFuncAllSimpleTypes"); + add("testStoreFuncSimple"); + add("testStoreInPartiitonedTbl"); + add("testStoreMultiTables"); + add("testStoreWithNoCtorArgs"); + add("testStoreWithNoSchema"); + add("testWriteChar"); + add("testWriteDate"); + add("testWriteDate2"); + add("testWriteDate3"); + add("testWriteDecimal"); + add("testWriteDecimalX"); + add("testWriteDecimalXY"); + add("testWriteSmallint"); + add("testWriteTimestamp"); + add("testWriteTinyint"); + add("testWriteVarchar"); + }}); + put(IOConstants.PARQUETFILE, new HashSet() {{ + add("testBagNStruct"); + add("testDateCharTypes"); + add("testDynamicPartitioningMultiPartColsInDataNoSpec"); + add("testDynamicPartitioningMultiPartColsInDataPartialSpec"); + add("testMultiPartColsInData"); + add("testPartColsInData"); + add("testStoreFuncAllSimpleTypes"); + add("testStoreFuncSimple"); + add("testStoreInPartiitonedTbl"); + add("testStoreMultiTables"); + add("testStoreWithNoCtorArgs"); + add("testStoreWithNoSchema"); + add("testWriteChar"); + add("testWriteDate"); + add("testWriteDate2"); + add("testWriteDate3"); + add("testWriteDecimal"); + add("testWriteDecimalX"); + add("testWriteDecimalXY"); + add("testWriteSmallint"); + add("testWriteTimestamp"); + add("testWriteTinyint"); + add("testWriteVarchar"); + }}); + }}; + + private String storageFormat; + + @Parameterized.Parameters + public static Collection generateParameters() { + return StorageFormats.names(); + } + + public TestHCatStorer(String storageFormat) { + this.storageFormat = storageFormat; + } + //Start: tests that check values from Pig that are out of range for target column @Test public void testWriteTinyint() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); pigValueRangeTest("junitTypeTest1", "tinyint", "int", null, Integer.toString(1), Integer.toString(1)); pigValueRangeTestOverflow("junitTypeTest1", "tinyint", "int", null, Integer.toString(300)); pigValueRangeTestOverflow("junitTypeTest2", "tinyint", "int", HCatBaseStorer.OOR_VALUE_OPT_VALUES.Null, @@ -71,6 +152,7 @@ public void testWriteTinyint() throws Exception { @Test public void testWriteSmallint() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); pigValueRangeTest("junitTypeTest1", "smallint", "int", null, Integer.toString(Short.MIN_VALUE), Integer.toString(Short.MIN_VALUE)); pigValueRangeTestOverflow("junitTypeTest2", "smallint", "int", HCatBaseStorer.OOR_VALUE_OPT_VALUES.Null, @@ -81,6 +163,7 @@ public void testWriteSmallint() throws Exception { @Test public void testWriteChar() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); pigValueRangeTest("junitTypeTest1", "char(5)", "chararray", null, "xxx", "xxx "); pigValueRangeTestOverflow("junitTypeTest1", "char(5)", "chararray", null, "too_long"); pigValueRangeTestOverflow("junitTypeTest2", "char(5)", "chararray", HCatBaseStorer.OOR_VALUE_OPT_VALUES.Null, @@ -91,6 +174,7 @@ public void testWriteChar() throws Exception { @Test public void testWriteVarchar() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); pigValueRangeTest("junitTypeTest1", "varchar(5)", "chararray", null, "xxx", "xxx"); pigValueRangeTestOverflow("junitTypeTest1", "varchar(5)", "chararray", null, "too_long"); pigValueRangeTestOverflow("junitTypeTest2", "varchar(5)", "chararray", HCatBaseStorer.OOR_VALUE_OPT_VALUES.Null, @@ -101,6 +185,7 @@ public void testWriteVarchar() throws Exception { @Test public void testWriteDecimalXY() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); pigValueRangeTest("junitTypeTest1", "decimal(5,2)", "bigdecimal", null, BigDecimal.valueOf(1.2).toString(), BigDecimal.valueOf(1.2).toString()); pigValueRangeTestOverflow("junitTypeTest1", "decimal(5,2)", "bigdecimal", null, BigDecimal.valueOf(12345.12).toString()); @@ -112,6 +197,7 @@ public void testWriteDecimalXY() throws Exception { @Test public void testWriteDecimalX() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); //interestingly decimal(2) means decimal(2,0) pigValueRangeTest("junitTypeTest1", "decimal(2)", "bigdecimal", null, BigDecimal.valueOf(12).toString(), BigDecimal.valueOf(12).toString()); @@ -123,6 +209,7 @@ public void testWriteDecimalX() throws Exception { @Test public void testWriteDecimal() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); //decimal means decimal(10,0) pigValueRangeTest("junitTypeTest1", "decimal", "bigdecimal", null, BigDecimal.valueOf(1234567890).toString(), BigDecimal.valueOf(1234567890).toString()); @@ -137,8 +224,10 @@ public void testWriteDecimal() throws Exception { * include time to make sure it's 0 */ private static final String FORMAT_4_DATE = "yyyy-MM-dd HH:mm:ss"; + @Test public void testWriteDate() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); DateTime d = new DateTime(1991,10,11,0,0); pigValueRangeTest("junitTypeTest1", "date", "datetime", null, d.toString(), d.toString(FORMAT_4_DATE), FORMAT_4_DATE); @@ -157,6 +246,7 @@ public void testWriteDate() throws Exception { @Test public void testWriteDate3() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); DateTime d = new DateTime(1991,10,11,23,10,DateTimeZone.forOffsetHours(-11)); FrontendException fe = null; //expect to fail since the time component is not 0 @@ -170,6 +260,7 @@ public void testWriteDate3() throws Exception { @Test public void testWriteDate2() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); DateTime d = new DateTime(1991,11,12,0,0, DateTimeZone.forID("US/Eastern")); pigValueRangeTest("junitTypeTest1", "date", "datetime", null, d.toString(), d.toString(FORMAT_4_DATE), FORMAT_4_DATE); @@ -193,6 +284,7 @@ public void testWriteDate2() throws Exception { */ @Test public void testWriteTimestamp() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); DateTime d = new DateTime(1991,10,11,14,23,30, 10);//uses default TZ pigValueRangeTest("junitTypeTest1", "timestamp", "datetime", null, d.toString(), d.toDateTime(DateTimeZone.getDefault()).toString()); @@ -229,13 +321,6 @@ private void pigValueRangeTest(String tblName, String hiveType, String pigType, } /** - * this should be overridden in subclass to test with different file formats - */ - String getStorageFormat() { - return "RCFILE"; - } - - /** * This is used to test how Pig values of various data types which are out of range for Hive target * column are handled. Currently the options are to raise an error or write NULL. * 1. create a data file with 1 column, 1 row @@ -258,7 +343,7 @@ private void pigValueRangeTest(String tblName, String hiveType, String pigType, throws Exception { TestHCatLoader.dropTable(tblName, driver); final String field = "f1"; - TestHCatLoader.createTable(tblName, field + " " + hiveType, null, driver, getStorageFormat()); + TestHCatLoader.createTable(tblName, field + " " + hiveType, null, driver, storageFormat); HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, new String[] {inputValue}); LOG.debug("File=" + INPUT_FILE_NAME); dumpFile(INPUT_FILE_NAME); @@ -287,11 +372,11 @@ private void pigValueRangeTest(String tblName, String hiveType, String pigType, //do nothing, fall through and verify the data break; case Throw: - Assert.assertTrue("Expected a FrontendException", fe != null); - Assert.assertEquals("Expected a different FrontendException.", fe.getMessage(), "Unable to store alias A"); + assertTrue("Expected a FrontendException", fe != null); + assertEquals("Expected a different FrontendException.", fe.getMessage(), "Unable to store alias A"); return;//this test is done default: - Assert.assertFalse("Unexpected goal: " + goal, 1 == 1); + assertFalse("Unexpected goal: " + goal, 1 == 1); } } logAndRegister(server, "B = load '" + tblName + "' using " + HCatLoader.class.getName() + "();", queryNumber); @@ -310,17 +395,17 @@ private void pigValueRangeTest(String tblName, String hiveType, String pigType, Tuple t = itr.next(); if("date".equals(hiveType)) { DateTime dateTime = (DateTime)t.get(0); - Assert.assertTrue(format != null); - Assert.assertEquals("Comparing Pig to Raw data for table " + tblName, expectedValue, dateTime== null ? null : dateTime.toString(format)); + assertTrue(format != null); + assertEquals("Comparing Pig to Raw data for table " + tblName, expectedValue, dateTime== null ? null : dateTime.toString(format)); } else { - Assert.assertEquals("Comparing Pig to Raw data for table " + tblName, expectedValue, t.isNull(0) ? null : t.get(0).toString()); + assertEquals("Comparing Pig to Raw data for table " + tblName, expectedValue, t.isNull(0) ? null : t.get(0).toString()); } //see comment at "Dumping rows via SQL..." for why this doesn't work - //Assert.assertEquals("Comparing Pig to Hive", t.get(0), l.get(0)); + //assertEquals("Comparing Pig to Hive", t.get(0), l.get(0)); numRowsRead++; } - Assert.assertEquals("Expected " + 1 + " rows; got " + numRowsRead + " file=" + INPUT_FILE_NAME + "; table " + + assertEquals("Expected " + 1 + " rows; got " + numRowsRead + " file=" + INPUT_FILE_NAME + "; table " + tblName, 1, numRowsRead); /* Misc notes: Unfortunately Timestamp.toString() adjusts the value for local TZ and 't' is a String @@ -334,10 +419,11 @@ private void pigValueRangeTest(String tblName, String hiveType, String pigType, */ @Test public void testDateCharTypes() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); final String tblName = "junit_date_char"; TestHCatLoader.dropTable(tblName, driver); TestHCatLoader.createTable(tblName, - "id int, char5 char(5), varchar10 varchar(10), dec52 decimal(5,2)", null, driver, getStorageFormat()); + "id int, char5 char(5), varchar10 varchar(10), dec52 decimal(5,2)", null, driver, storageFormat); int NUM_ROWS = 5; String[] rows = new String[NUM_ROWS]; for(int i = 0; i < NUM_ROWS; i++) { @@ -376,12 +462,12 @@ public void testDateCharTypes() throws Exception { rowFromPig.append(t.get(i)).append("\t"); } rowFromPig.setLength(rowFromPig.length() - 1); - Assert.assertEquals("Comparing Pig to Raw data", rows[numRowsRead], rowFromPig.toString()); + assertEquals("Comparing Pig to Raw data", rows[numRowsRead], rowFromPig.toString()); //see comment at "Dumping rows via SQL..." for why this doesn't work (for all types) - //Assert.assertEquals("Comparing Pig to Hive", rowFromPig.toString(), l.get(numRowsRead)); + //assertEquals("Comparing Pig to Hive", rowFromPig.toString(), l.get(numRowsRead)); numRowsRead++; } - Assert.assertEquals("Expected " + NUM_ROWS + " rows; got " + numRowsRead + " file=" + INPUT_FILE_NAME, NUM_ROWS, numRowsRead); + assertEquals("Expected " + NUM_ROWS + " rows; got " + numRowsRead + " file=" + INPUT_FILE_NAME, NUM_ROWS, numRowsRead); } static void dumpFile(String fileName) throws Exception { @@ -397,9 +483,10 @@ static void dumpFile(String fileName) throws Exception { @Test public void testPartColsInData() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); - String createTable = "create table junit_unparted(a int) partitioned by (b string) stored as " + getStorageFormat(); + String createTable = "create table junit_unparted(a int) partitioned by (b string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -420,22 +507,23 @@ public void testPartColsInData() throws IOException, CommandNeedRetryException { while (itr.hasNext()) { Tuple t = itr.next(); - Assert.assertEquals(2, t.size()); - Assert.assertEquals(t.get(0), i); - Assert.assertEquals(t.get(1), "1"); + assertEquals(2, t.size()); + assertEquals(t.get(0), i); + assertEquals(t.get(1), "1"); i++; } - Assert.assertFalse(itr.hasNext()); - Assert.assertEquals(LOOP_SIZE, i); + assertFalse(itr.hasNext()); + assertEquals(LOOP_SIZE, i); } @Test public void testMultiPartColsInData() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table employee"); String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " + - " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS " + getStorageFormat(); + " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { @@ -464,20 +552,21 @@ public void testMultiPartColsInData() throws IOException, CommandNeedRetryExcept driver.run("select * from employee"); ArrayList results = new ArrayList(); driver.getResults(results); - Assert.assertEquals(4, results.size()); + assertEquals(4, results.size()); Collections.sort(results); - Assert.assertEquals(inputData[0], results.get(0)); - Assert.assertEquals(inputData[1], results.get(1)); - Assert.assertEquals(inputData[2], results.get(2)); - Assert.assertEquals(inputData[3], results.get(3)); + assertEquals(inputData[0], results.get(0)); + assertEquals(inputData[1], results.get(1)); + assertEquals(inputData[2], results.get(2)); + assertEquals(inputData[3], results.get(3)); driver.run("drop table employee"); } @Test public void testStoreInPartiitonedTbl() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); - String createTable = "create table junit_unparted(a int) partitioned by (b string) stored as " + getStorageFormat(); + String createTable = "create table junit_unparted(a int) partitioned by (b string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -498,20 +587,21 @@ public void testStoreInPartiitonedTbl() throws IOException, CommandNeedRetryExce while (itr.hasNext()) { Tuple t = itr.next(); - Assert.assertEquals(2, t.size()); - Assert.assertEquals(t.get(0), i); - Assert.assertEquals(t.get(1), "1"); + assertEquals(2, t.size()); + assertEquals(t.get(0), i); + assertEquals(t.get(1), "1"); i++; } - Assert.assertFalse(itr.hasNext()); - Assert.assertEquals(11, i); + assertFalse(itr.hasNext()); + assertEquals(11, i); } @Test public void testNoAlias() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_parted"); - String createTable = "create table junit_parted(a int, b string) partitioned by (ds string) stored as " + getStorageFormat(); + String createTable = "create table junit_parted(a int, b string) partitioned by (ds string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -526,12 +616,12 @@ public void testNoAlias() throws IOException, CommandNeedRetryException { server.executeBatch(); } catch (PigException fe) { PigException pe = LogUtils.getPigException(fe); - Assert.assertTrue(pe instanceof FrontendException); - Assert.assertEquals(PigHCatUtil.PIG_EXCEPTION_CODE, pe.getErrorCode()); - Assert.assertTrue(pe.getMessage().contains("Column name for a field is not specified. Please provide the full schema as an argument to HCatStorer.")); + assertTrue(pe instanceof FrontendException); + assertEquals(PigHCatUtil.PIG_EXCEPTION_CODE, pe.getErrorCode()); + assertTrue(pe.getMessage().contains("Column name for a field is not specified. Please provide the full schema as an argument to HCatStorer.")); errCaught = true; } - Assert.assertTrue(errCaught); + assertTrue(errCaught); errCaught = false; try { server.setBatchOn(); @@ -541,20 +631,21 @@ public void testNoAlias() throws IOException, CommandNeedRetryException { server.executeBatch(); } catch (PigException fe) { PigException pe = LogUtils.getPigException(fe); - Assert.assertTrue(pe instanceof FrontendException); - Assert.assertEquals(PigHCatUtil.PIG_EXCEPTION_CODE, pe.getErrorCode()); - Assert.assertTrue(pe.getMessage().contains("Column names should all be in lowercase. Invalid name found: B")); + assertTrue(pe instanceof FrontendException); + assertEquals(PigHCatUtil.PIG_EXCEPTION_CODE, pe.getErrorCode()); + assertTrue(pe.getMessage().contains("Column names should all be in lowercase. Invalid name found: B")); errCaught = true; } driver.run("drop table junit_parted"); - Assert.assertTrue(errCaught); + assertTrue(errCaught); } @Test public void testStoreMultiTables() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); - String createTable = "create table junit_unparted(a int, b string) stored as " + getStorageFormat(); + String createTable = "create table junit_unparted(a int, b string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -598,18 +689,19 @@ public void testStoreMultiTables() throws IOException, CommandNeedRetryException Iterator itr = res.iterator(); for (int i = 0; i < LOOP_SIZE * LOOP_SIZE; i++) { - Assert.assertEquals(input[i], itr.next()); + assertEquals(input[i], itr.next()); } - Assert.assertFalse(itr.hasNext()); + assertFalse(itr.hasNext()); } @Test public void testStoreWithNoSchema() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); - String createTable = "create table junit_unparted(a int, b string) stored as " + getStorageFormat(); + String createTable = "create table junit_unparted(a int, b string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -637,18 +729,19 @@ public void testStoreWithNoSchema() throws IOException, CommandNeedRetryExceptio driver.run("drop table junit_unparted"); Iterator itr = res.iterator(); for (int i = 0; i < LOOP_SIZE * LOOP_SIZE; i++) { - Assert.assertEquals(input[i], itr.next()); + assertEquals(input[i], itr.next()); } - Assert.assertFalse(itr.hasNext()); + assertFalse(itr.hasNext()); } @Test public void testStoreWithNoCtorArgs() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); - String createTable = "create table junit_unparted(a int, b string) stored as " + getStorageFormat(); + String createTable = "create table junit_unparted(a int, b string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -676,18 +769,19 @@ public void testStoreWithNoCtorArgs() throws IOException, CommandNeedRetryExcept driver.run("drop table junit_unparted"); Iterator itr = res.iterator(); for (int i = 0; i < LOOP_SIZE * LOOP_SIZE; i++) { - Assert.assertEquals(input[i], itr.next()); + assertEquals(input[i], itr.next()); } - Assert.assertFalse(itr.hasNext()); + assertFalse(itr.hasNext()); } @Test public void testEmptyStore() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); - String createTable = "create table junit_unparted(a int, b string) stored as " + getStorageFormat(); + String createTable = "create table junit_unparted(a int, b string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -715,15 +809,16 @@ public void testEmptyStore() throws IOException, CommandNeedRetryException { driver.getResults(res); driver.run("drop table junit_unparted"); Iterator itr = res.iterator(); - Assert.assertFalse(itr.hasNext()); + assertFalse(itr.hasNext()); } @Test public void testBagNStruct() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); String createTable = "create table junit_unparted(b string,a struct, arr_of_struct array, " + - "arr_of_struct2 array>, arr_of_struct3 array>) stored as " + getStorageFormat(); + "arr_of_struct2 array>, arr_of_struct3 array>) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -746,17 +841,18 @@ public void testBagNStruct() throws IOException, CommandNeedRetryException { driver.getResults(res); driver.run("drop table junit_unparted"); Iterator itr = res.iterator(); - Assert.assertEquals("zookeeper\t{\"a1\":2}\t[\"pig\"]\t[{\"s1\":\"pnuts\",\"s2\":\"hdfs\"}]\t[{\"s3\":\"hadoop\"},{\"s3\":\"hcat\"}]", itr.next()); - Assert.assertEquals("chubby\t{\"a1\":2}\t[\"sawzall\"]\t[{\"s1\":\"bigtable\",\"s2\":\"gfs\"}]\t[{\"s3\":\"mapreduce\"},{\"s3\":\"hcat\"}]", itr.next()); - Assert.assertFalse(itr.hasNext()); + assertEquals("zookeeper\t{\"a1\":2}\t[\"pig\"]\t[{\"s1\":\"pnuts\",\"s2\":\"hdfs\"}]\t[{\"s3\":\"hadoop\"},{\"s3\":\"hcat\"}]", itr.next()); + assertEquals("chubby\t{\"a1\":2}\t[\"sawzall\"]\t[{\"s1\":\"bigtable\",\"s2\":\"gfs\"}]\t[{\"s3\":\"mapreduce\"},{\"s3\":\"hcat\"}]", itr.next()); + assertFalse(itr.hasNext()); } @Test public void testStoreFuncAllSimpleTypes() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); - String createTable = "create table junit_unparted(a int, b float, c double, d bigint, e string, h boolean, f binary, g binary) stored as " + getStorageFormat(); + String createTable = "create table junit_unparted(a int, b float, c double, d bigint, e string, h boolean, f binary, g binary) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -783,10 +879,10 @@ public void testStoreFuncAllSimpleTypes() throws IOException, CommandNeedRetryEx Iterator itr = res.iterator(); String next = itr.next(); - Assert.assertEquals("0\tNULL\tNULL\tNULL\tNULL\tNULL\tNULL\tNULL", next ); - Assert.assertEquals("NULL\t4.2\t2.2\t4\tlets hcat\ttrue\tbinary-data\tNULL", itr.next()); - Assert.assertEquals("3\t6.2999997\t3.3000000000000003\t6\tlets hcat\tfalse\tbinary-data\tNULL", itr.next()); - Assert.assertFalse(itr.hasNext()); + assertEquals("0\tNULL\tNULL\tNULL\tNULL\tNULL\tNULL\tNULL", next ); + assertEquals("NULL\t4.2\t2.2\t4\tlets hcat\ttrue\tbinary-data\tNULL", itr.next()); + assertEquals("3\t6.2999997\t3.3000000000000003\t6\tlets hcat\tfalse\tbinary-data\tNULL", itr.next()); + assertFalse(itr.hasNext()); server.registerQuery("B = load 'junit_unparted' using " + HCatLoader.class.getName() + ";"); Iterator iter = server.openIterator("B"); @@ -797,21 +893,22 @@ public void testStoreFuncAllSimpleTypes() throws IOException, CommandNeedRetryEx if (t.get(6) == null) { num5nulls++; } else { - Assert.assertTrue(t.get(6) instanceof DataByteArray); + assertTrue(t.get(6) instanceof DataByteArray); } - Assert.assertNull(t.get(7)); + assertNull(t.get(7)); count++; } - Assert.assertEquals(3, count); - Assert.assertEquals(1, num5nulls); + assertEquals(3, count); + assertEquals(1, num5nulls); driver.run("drop table junit_unparted"); } @Test public void testStoreFuncSimple() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table junit_unparted"); - String createTable = "create table junit_unparted(a int, b string) stored as " + getStorageFormat(); + String createTable = "create table junit_unparted(a int, b string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -841,19 +938,20 @@ public void testStoreFuncSimple() throws IOException, CommandNeedRetryException for (int i = 1; i <= LOOP_SIZE; i++) { String si = i + ""; for (int j = 1; j <= LOOP_SIZE; j++) { - Assert.assertEquals(si + "\t" + j, itr.next()); + assertEquals(si + "\t" + j, itr.next()); } } - Assert.assertFalse(itr.hasNext()); + assertFalse(itr.hasNext()); } @Test public void testDynamicPartitioningMultiPartColsInDataPartialSpec() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table if exists employee"); String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " + - " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS " + getStorageFormat(); + " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { @@ -876,21 +974,22 @@ public void testDynamicPartitioningMultiPartColsInDataPartialSpec() throws IOExc driver.run("select * from employee"); ArrayList results = new ArrayList(); driver.getResults(results); - Assert.assertEquals(4, results.size()); + assertEquals(4, results.size()); Collections.sort(results); - Assert.assertEquals(inputData[0], results.get(0)); - Assert.assertEquals(inputData[1], results.get(1)); - Assert.assertEquals(inputData[2], results.get(2)); - Assert.assertEquals(inputData[3], results.get(3)); + assertEquals(inputData[0], results.get(0)); + assertEquals(inputData[1], results.get(1)); + assertEquals(inputData[2], results.get(2)); + assertEquals(inputData[3], results.get(3)); driver.run("drop table employee"); } @Test public void testDynamicPartitioningMultiPartColsInDataNoSpec() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table if exists employee"); String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " + - " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS " + getStorageFormat(); + " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { @@ -913,21 +1012,22 @@ public void testDynamicPartitioningMultiPartColsInDataNoSpec() throws IOExceptio driver.run("select * from employee"); ArrayList results = new ArrayList(); driver.getResults(results); - Assert.assertEquals(4, results.size()); + assertEquals(4, results.size()); Collections.sort(results); - Assert.assertEquals(inputData[0], results.get(0)); - Assert.assertEquals(inputData[1], results.get(1)); - Assert.assertEquals(inputData[2], results.get(2)); - Assert.assertEquals(inputData[3], results.get(3)); + assertEquals(inputData[0], results.get(0)); + assertEquals(inputData[1], results.get(1)); + assertEquals(inputData[2], results.get(2)); + assertEquals(inputData[3], results.get(3)); driver.run("drop table employee"); } @Test public void testDynamicPartitioningMultiPartColsNoDataInDataNoSpec() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table if exists employee"); String createTable = "CREATE TABLE employee (emp_id INT, emp_name STRING, emp_start_date STRING , emp_gender STRING ) " + - " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS " + getStorageFormat(); + " PARTITIONED BY (emp_country STRING , emp_state STRING ) STORED AS " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { @@ -947,15 +1047,16 @@ public void testDynamicPartitioningMultiPartColsNoDataInDataNoSpec() throws IOEx driver.run("select * from employee"); ArrayList results = new ArrayList(); driver.getResults(results); - Assert.assertEquals(0, results.size()); + assertEquals(0, results.size()); driver.run("drop table employee"); } + @Test - public void testPartitionPublish() - throws IOException, CommandNeedRetryException { + public void testPartitionPublish() throws IOException, CommandNeedRetryException { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); driver.run("drop table ptn_fail"); - String createTable = "create table ptn_fail(a int, c string) partitioned by (b string) stored as " + getStorageFormat(); + String createTable = "create table ptn_fail(a int, c string) partitioned by (b string) stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table."); @@ -987,11 +1088,11 @@ public void testPartitionPublish() ArrayList res = new ArrayList(); driver.getResults(res); - Assert.assertEquals(0, res.size()); + assertEquals(0, res.size()); // Make sure the partitions directory is not in hdfs. - Assert.assertTrue((new File(TEST_WAREHOUSE_DIR + "/ptn_fail")).exists()); - Assert.assertFalse((new File(TEST_WAREHOUSE_DIR + "/ptn_fail/b=math")) + assertTrue((new File(TEST_WAREHOUSE_DIR + "/ptn_fail")).exists()); + assertFalse((new File(TEST_WAREHOUSE_DIR + "/ptn_fail/b=math")) .exists()); } diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java index 9679d3c..0c3ec8b 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java @@ -18,18 +18,25 @@ */ package org.apache.hive.hcatalog.pig; +import com.google.common.collect.ImmutableSet; + import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.io.IOConstants; +import org.apache.hadoop.hive.ql.io.StorageFormats; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hive.hcatalog.common.HCatUtil; @@ -41,13 +48,17 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; import static org.junit.Assert.assertEquals; +import static org.junit.Assume.assumeTrue; +@RunWith(Parameterized.class) public class TestHCatStorerMulti { public static final String TEST_DATA_DIR = HCatUtil.makePathASafeFileName( - System.getProperty("user.dir") + "/build/test/data/" + - TestHCatStorerMulti.class.getCanonicalName() + "-" + System.currentTimeMillis()); + System.getProperty("user.dir") + "/build/test/data/" + + TestHCatStorerMulti.class.getCanonicalName() + "-" + System.currentTimeMillis()); private static final String TEST_WAREHOUSE_DIR = TEST_DATA_DIR + "/warehouse"; private static final String INPUT_FILE_NAME = TEST_DATA_DIR + "/input.data"; @@ -57,9 +68,29 @@ private static Map> basicInputData; - protected String storageFormat() { - return "RCFILE tblproperties('hcat.isd'='org.apache.hive.hcatalog.rcfile.RCFileInputDriver'," + - "'hcat.osd'='org.apache.hive.hcatalog.rcfile.RCFileOutputDriver')"; + private static final Map> DISABLED_STORAGE_FORMATS = + new HashMap>() {{ + put(IOConstants.AVRO, new HashSet() {{ + add("testStoreBasicTable"); + add("testStorePartitionedTable"); + add("testStoreTableMulti"); + }}); + put(IOConstants.PARQUETFILE, new HashSet() {{ + add("testStoreBasicTable"); + add("testStorePartitionedTable"); + add("testStoreTableMulti"); + }}); + }}; + + private String storageFormat; + + @Parameterized.Parameters + public static Collection generateParameters() { + return StorageFormats.names(); + } + + public TestHCatStorerMulti(String storageFormat) { + this.storageFormat = storageFormat; } private void dropTable(String tablename) throws IOException, CommandNeedRetryException { @@ -72,7 +103,7 @@ private void createTable(String tablename, String schema, String partitionedBy) if ((partitionedBy != null) && (!partitionedBy.trim().isEmpty())) { createTable = createTable + "partitioned by (" + partitionedBy + ") "; } - createTable = createTable + "stored as " + storageFormat(); + createTable = createTable + "stored as " + storageFormat; int retCode = driver.run(createTable).getResponseCode(); if (retCode != 0) { throw new IOException("Failed to create table. [" + createTable + "], return code from hive driver : [" + retCode + "]"); @@ -85,6 +116,8 @@ private void createTable(String tablename, String schema) throws IOException, Co @Before public void setUp() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); + if (driver == null) { HiveConf hiveConf = new HiveConf(this.getClass()); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); @@ -105,6 +138,7 @@ public void tearDown() throws Exception { @Test public void testStoreBasicTable() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); createTable(BASIC_TABLE, "a int, b string"); populateBasicFile(); @@ -124,6 +158,7 @@ public void testStoreBasicTable() throws Exception { @Test public void testStorePartitionedTable() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); createTable(PARTITIONED_TABLE, "a int, b string", "bkt string"); populateBasicFile(); @@ -147,6 +182,7 @@ public void testStorePartitionedTable() throws Exception { @Test public void testStoreTableMulti() throws Exception { + assumeTrue(!TestUtil.shouldSkip(storageFormat, DISABLED_STORAGE_FORMATS)); createTable(BASIC_TABLE, "a int, b string"); createTable(PARTITIONED_TABLE, "a int, b string", "bkt string"); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatLoader.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatLoader.java index 82eb0d7..e69de29 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatLoader.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatLoader.java @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hive.hcatalog.pig; - -public class TestOrcHCatLoader extends TestHCatLoader { - - @Override - protected String storageFormat() { - return "orc"; - } - -} - diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatLoaderComplexSchema.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatLoaderComplexSchema.java index 0538771..e69de29 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatLoaderComplexSchema.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatLoaderComplexSchema.java @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hive.hcatalog.pig; - -public class TestOrcHCatLoaderComplexSchema extends TestHCatLoaderComplexSchema { - - @Override - protected String storageFormat() { - return "orc"; - } - -} diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatStorer.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatStorer.java index 65769b4..e69de29 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatStorer.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatStorer.java @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hive.hcatalog.pig; - -import java.io.IOException; - -import org.apache.hadoop.hive.ql.CommandNeedRetryException; - -import org.junit.Ignore; -import org.junit.Test; - -public class TestOrcHCatStorer extends TestHCatStorer { - @Override String getStorageFormat() { - return "ORC"; - } -} diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatStorerMulti.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatStorerMulti.java index 77c7979..e69de29 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatStorerMulti.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestOrcHCatStorerMulti.java @@ -1,28 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.apache.hive.hcatalog.pig; - -public class TestOrcHCatStorerMulti extends TestHCatStorerMulti { - - @Override - protected String storageFormat() { - return "orc"; - } -} - diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestUtil.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestUtil.java new file mode 100644 index 0000000..8a652f0 --- /dev/null +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestUtil.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hive.hcatalog.pig; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import org.junit.Test; + +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; + +/** + * Test utilities for selectively disabling specific test methods for given storage formats. + */ +public class TestUtil { + private static final Map> SAMPLE_DISABLED_TESTS_MAP = + new HashMap>() {{ + put("test", new HashSet() {{ + add("testShouldSkip"); + }}); + }}; + + /** + * Determine whether the caller test method is in a set of disabled test methods for a given + * storage format. + * + * @param storageFormat The name of the storage format used in a STORED AS clause. + * @param disabledTestsMap Map of storage format name to set of test method names that indicate + * which test methods should not run against the given storage format. + * @return True if the caller test method should be skipped for the given storage format. + */ + public static boolean shouldSkip(String storageFormat, Map> disabledTestsMap) { + final StackTraceElement[] elements = Thread.currentThread().getStackTrace(); + // The "bottom" of the call stack is at the front of the array. The elements are as follows: + // [0] getStackTrace() + // [1] shouldSkip() + // [2] caller test method + String methodName = elements[2].getMethodName(); + if (!disabledTestsMap.containsKey(storageFormat)) { + return false; + } + + Set disabledMethods = disabledTestsMap.get(storageFormat); + return disabledMethods.contains(methodName); + } + + @Test + public void testShouldSkip() { + assertTrue(TestUtil.shouldSkip("test", SAMPLE_DISABLED_TESTS_MAP)); + } + + @Test + public void testShouldNotSkip() { + assertFalse(TestUtil.shouldSkip("test", SAMPLE_DISABLED_TESTS_MAP)); + assertFalse(TestUtil.shouldSkip("foo", SAMPLE_DISABLED_TESTS_MAP)); + } +} diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java index 664248d..a72c6b5 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java @@ -214,7 +214,7 @@ public void onCreateTable(CreateTableEvent tableEvent) throws MetaException { HiveConf conf = handler.getHiveConf(); Table newTbl; try { - newTbl = handler.get_table(tbl.getDbName(), tbl.getTableName()) + newTbl = handler.get_table_core(tbl.getDbName(), tbl.getTableName()) .deepCopy(); newTbl.getParameters().put( HCatConstants.HCAT_MSGBUS_TOPIC_NAME, diff --git a/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml b/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml index 00c660c..d8a5df1 100644 --- a/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml +++ b/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml @@ -77,6 +77,11 @@ shipped to the target node in the cluster to execute Pig job which uses HCat, Hive query, etc. + + templeton.sqoop.path + ${env.SQOOP_HOME}/bin/sqoop + The path to the Sqoop executable. + templeton.controller.mr.child.opts diff --git a/hcatalog/src/test/e2e/templeton/deployers/env.sh b/hcatalog/src/test/e2e/templeton/deployers/env.sh index 804f2fd..d66dd94 100755 --- a/hcatalog/src/test/e2e/templeton/deployers/env.sh +++ b/hcatalog/src/test/e2e/templeton/deployers/env.sh @@ -30,7 +30,7 @@ export PIG_VERSION=0.12.2-SNAPSHOT export PROJ_HOME=/Users/${USER}/dev/hive export HIVE_HOME=${PROJ_HOME}/packaging/target/apache-hive-${HIVE_VERSION}-bin/apache-hive-${HIVE_VERSION}-bin export HADOOP_HOME=/Users/${USER}/dev/hwxhadoop/hadoop-dist/target/hadoop-${HADOOP_VERSION} -#export SQOOP_HOME=/ +export SQOOP_HOME=/Users/${USER}/dev/sqoop-1.4.4.bin__hadoop-2.0.4-alpha #Make sure Pig is built for the Hadoop version you are running export PIG_TAR_PATH=/Users/${USER}/dev/pig-${PIG_VERSION}-src/build diff --git a/hcatalog/src/test/e2e/templeton/tests/doas.conf b/hcatalog/src/test/e2e/templeton/tests/doas.conf index 284168b..5d67968 100644 --- a/hcatalog/src/test/e2e/templeton/tests/doas.conf +++ b/hcatalog/src/test/e2e/templeton/tests/doas.conf @@ -109,7 +109,7 @@ $cfg = 'method' => 'GET', 'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/default/table/:UNAME:_doastab2/partition?user.name=:UNAME:&doAs=:DOAS:', 'status_code' => 500, - 'json_field_substr_match' => {'error' => 'FAILED: AuthorizationException java\.security\.AccessControlException: action READ not permitted on path .* for user :DOAS:'}, + 'json_field_substr_match' => {'error' => 'java\.security\.AccessControlException: Permission denied: user=:DOAS:, access=READ'}, }, { @@ -118,7 +118,7 @@ $cfg = 'method' => 'DELETE', 'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/default/table/:UNAME:_doastab2?user.name=:UNAME:&doAs=:DOAS:', 'status_code' => 500, - 'json_field_substr_match' => {'error' => 'java\.security\.AccessControlException: action WRITE not permitted on path .* for user :DOAS:'}, + 'json_field_substr_match' => {'error' => 'java\.security\.AccessControlException: Permission denied: user=:DOAS:, access=READ'}, }, { #descbe the table.... diff --git a/hcatalog/src/test/e2e/templeton/tests/hcatperms.conf b/hcatalog/src/test/e2e/templeton/tests/hcatperms.conf index 9b6d71f..0c0913e 100644 --- a/hcatalog/src/test/e2e/templeton/tests/hcatperms.conf +++ b/hcatalog/src/test/e2e/templeton/tests/hcatperms.conf @@ -375,6 +375,13 @@ $cfg = { 'method' => 'DELETE', 'format_header' => 'Content-Type: application/json', + 'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/hcatperms_:TNUM:/table/permstable_:TNUM:', + 'user_name' => ':UNAME_GROUP:', + 'status_code' => 200, + }, + { + 'method' => 'DELETE', + 'format_header' => 'Content-Type: application/json', 'url' => ':TEMPLETON_URL:/templeton/v1/ddl/database/hcatperms_:TNUM:?ifExists=true&option=cascade', 'user_name' => ':UNAME:', 'status_code' => 200, @@ -677,9 +684,7 @@ $cfg = 'format_header' => 'Content-Type: application/json', 'user_name' => ':UNAME_OTHER:', 'status_code' => 500, - 'json_field_substr_match' => {'error' => 'FAILED: AuthorizationException .*\.security\.AccessControlException: action READ not permitted on path .* for user :UNAME_OTHER:'}, - - + 'json_field_substr_match' => {'error' => 'AccessControlException: Permission denied: user=:UNAME_OTHER:, access=READ'}, }, diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java index 2e2987d..c08942c 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java @@ -168,7 +168,7 @@ Table toHiveTable() throws HCatException { newTable.setTableType(TableType.MANAGED_TABLE.toString()); } - if (this.comment != null) { + if (StringUtils.isNotBlank(this.comment)) { newTable.putToParameters("comment", comment); } diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml index 17499fa..8c94717 100644 --- a/hcatalog/webhcat/svr/pom.xml +++ b/hcatalog/webhcat/svr/pom.xml @@ -49,6 +49,11 @@ com.sun.jersey + jersey-core + ${jersey.version} + + + com.sun.jersey jersey-json ${jersey.version} diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/QueueStatusBean.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/QueueStatusBean.java index 2af17b3..e97b063 100644 --- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/QueueStatusBean.java +++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/QueueStatusBean.java @@ -32,7 +32,7 @@ public JobStatus status; public JobProfile profile; - public String id; + public final String id; public String parentId; public String percentComplete; public Long exitValue; @@ -40,8 +40,11 @@ public String callback; public String completed; public Map userargs; + public String msg; - public QueueStatusBean() { + public QueueStatusBean(String jobId, String errMsg) { + this.id = jobId; + this.msg = errMsg; } /** diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java index 0ca8ccc..9b18a4c 100644 --- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java +++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Server.java @@ -1008,8 +1008,15 @@ else if ((jobid != null && job.compareTo(jobid) > 0) || jobid == null) { jobItem.id = job; if (showDetails) { StatusDelegator sd = new StatusDelegator(appConf); - QueueStatusBean statusBean = sd.run(getDoAsUser(), job); - jobItem.detail = statusBean; + try { + jobItem.detail = sd.run(getDoAsUser(), job); + } + catch(Exception ex) { + /*if we could not get status for some reason, log it, and send empty status back with + * just the ID so that caller knows to even look in the log file*/ + LOG.info("Failed to get status detail for jobId='" + job + "'", ex); + jobItem.detail = new QueueStatusBean(job, "Failed to retrieve status; see WebHCat logs"); + } } detailList.add(jobItem); } diff --git a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/tool/TestTempletonUtils.java b/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/tool/TestTempletonUtils.java index b98e8c5..3dcdd64 100644 --- a/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/tool/TestTempletonUtils.java +++ b/hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/tool/TestTempletonUtils.java @@ -320,6 +320,6 @@ public void testFindContainingJar() throws Exception { result = TempletonUtils.findContainingJar(FileSystem.class, ".*hadoop.*\\.jar.*"); Assert.assertNotNull(result); result = TempletonUtils.findContainingJar(HadoopShimsSecure.class, ".*unknownjar.*"); - Assert.assertNull(result); + Assert.assertNull("unexpectedly found jar for HadoopShimsSecure class: " + result, result); } } diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java index 99026b0..e11466d 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java @@ -37,35 +37,51 @@ import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Tests information retrieved from hooks, in Kerberos mode. */ public class TestHs2HooksWithMiniKdc { + private static final Logger LOG = LoggerFactory.getLogger(TestHs2HooksWithMiniKdc.class); + public static class PostExecHook implements ExecuteWithHookContext { - public static String userName = null; - public static String ipAddress = null; + private static String userName; + private static String ipAddress; + private static String operation; + private static Throwable error; public void run(HookContext hookContext) { - if (hookContext.getHookType().equals(HookType.POST_EXEC_HOOK)) { - Assert.assertNotNull(hookContext.getIpAddress(), "IP Address is null"); - ipAddress = hookContext.getIpAddress(); - Assert.assertNotNull(hookContext.getUserName(), "Username is null"); - userName = hookContext.getUserName(); + try { + if (hookContext.getHookType().equals(HookType.POST_EXEC_HOOK)) { + ipAddress = hookContext.getIpAddress(); + userName = hookContext.getUserName(); + operation = hookContext.getOperationName(); + } + } catch (Throwable t) { + LOG.error("Error in PostExecHook: " + t, t); + error = t; } } } public static class PreExecHook implements ExecuteWithHookContext { - public static String userName = null; - public static String ipAddress = null; + private static String userName; + private static String ipAddress; + private static String operation; + private static Throwable error; public void run(HookContext hookContext) { - if (hookContext.getHookType().equals(HookType.PRE_EXEC_HOOK)) { - Assert.assertNotNull(hookContext.getIpAddress(), "IP Address is null"); - ipAddress = hookContext.getIpAddress(); - Assert.assertNotNull(hookContext.getUserName(), "Username is null"); - userName = hookContext.getUserName(); + try { + if (hookContext.getHookType().equals(HookType.PRE_EXEC_HOOK)) { + ipAddress = hookContext.getIpAddress(); + userName = hookContext.getUserName(); + operation = hookContext.getOperationName(); + } + } catch (Throwable t) { + LOG.error("Error in PreExecHook: " + t, t); + error = t; } } } @@ -108,22 +124,36 @@ public static void afterTest() throws Exception { /** * Test get IpAddress and username from hook. - * @throws Exception */ @Test - public void testIpUserName() throws Exception { + public void testIpUserName() throws Throwable { miniHiveKdc.loginUser(MiniHiveKdc.HIVE_TEST_USER_1); hs2Conn = DriverManager.getConnection(miniHS2.getJdbcURL()); Statement stmt = hs2Conn.createStatement(); + stmt.executeQuery("show databases"); stmt.executeQuery("show tables"); + Throwable error = PostExecHook.error; + if (error != null) { + throw error; + } + error = PreExecHook.error; + if (error != null) { + throw error; + } + Assert.assertNotNull(PostExecHook.ipAddress, "ipaddress is null"); + Assert.assertNotNull(PostExecHook.userName, "userName is null"); + Assert.assertNotNull(PostExecHook.operation , "operation is null"); Assert.assertEquals(MiniHiveKdc.HIVE_TEST_USER_1, PostExecHook.userName); - Assert.assertNotNull(PostExecHook.ipAddress); - Assert.assertTrue(PostExecHook.ipAddress.contains("127.0.0.1")); + Assert.assertTrue(PostExecHook.ipAddress, PostExecHook.ipAddress.contains("127.0.0.1")); + Assert.assertEquals("SHOWTABLES", PostExecHook.operation); + Assert.assertNotNull(PreExecHook.ipAddress, "ipaddress is null"); + Assert.assertNotNull(PreExecHook.userName, "userName is null"); + Assert.assertNotNull(PreExecHook.operation , "operation is null"); Assert.assertEquals(MiniHiveKdc.HIVE_TEST_USER_1, PreExecHook.userName); - Assert.assertNotNull(PreExecHook.ipAddress); - Assert.assertTrue(PreExecHook.ipAddress.contains("127.0.0.1")); + Assert.assertTrue(PreExecHook.ipAddress, PreExecHook.ipAddress.contains("127.0.0.1")); + Assert.assertEquals("SHOWTABLES", PreExecHook.operation); } } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java index 49b9994..0676758 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hive.hooks; import java.util.Properties; +import java.sql.Statement; import junit.framework.Assert; @@ -32,38 +33,52 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Tests information retrieved from hooks. */ public class TestHs2Hooks { - + private static final Logger LOG = LoggerFactory.getLogger(TestHs2Hooks.class); private static HiveServer2 hiveServer2; - public static class PreExecHook implements ExecuteWithHookContext { - public static String userName = null; - public static String ipAddress = null; + public static class PostExecHook implements ExecuteWithHookContext { + private static String userName; + private static String ipAddress; + private static String operation; + private static Throwable error; public void run(HookContext hookContext) { - if (hookContext.getHookType().equals(HookType.PRE_EXEC_HOOK)) { - Assert.assertNotNull(hookContext.getIpAddress(), "IP Address is null"); - ipAddress = hookContext.getIpAddress(); - Assert.assertNotNull(hookContext.getUserName(), "Username is null"); - userName = hookContext.getUserName(); + try { + if (hookContext.getHookType().equals(HookType.POST_EXEC_HOOK)) { + ipAddress = hookContext.getIpAddress(); + userName = hookContext.getUserName(); + operation = hookContext.getOperationName(); + } + } catch (Throwable t) { + LOG.error("Error in PostExecHook: " + t, t); + error = t; } } } - public static class PostExecHook implements ExecuteWithHookContext { - public static String userName = null; - public static String ipAddress = null; + public static class PreExecHook implements ExecuteWithHookContext { + private static String userName; + private static String ipAddress; + private static String operation; + private static Throwable error; public void run(HookContext hookContext) { - if (hookContext.getHookType().equals(HookType.POST_EXEC_HOOK)) { - Assert.assertNotNull(hookContext.getIpAddress(), "IP Address is null"); - ipAddress = hookContext.getIpAddress(); - Assert.assertNotNull(hookContext.getUserName(), "Username is null"); - userName = hookContext.getUserName(); + try { + if (hookContext.getHookType().equals(HookType.PRE_EXEC_HOOK)) { + ipAddress = hookContext.getIpAddress(); + userName = hookContext.getUserName(); + operation = hookContext.getOperationName(); + } + } catch (Throwable t) { + LOG.error("Error in PreExecHook: " + t, t); + error = t; } } } @@ -94,26 +109,39 @@ public static void tearDownAfterClass() throws Exception { /** * Test get IpAddress and username from hook. - * @throws Exception */ @Test - public void testIpUserName() throws Exception { + public void testIpUserName() throws Throwable { Properties connProp = new Properties(); connProp.setProperty("user", System.getProperty("user.name")); connProp.setProperty("password", ""); HiveConnection connection = new HiveConnection("jdbc:hive2://localhost:10000/default", connProp); - connection.createStatement().execute("show tables"); + Statement stmt = connection.createStatement(); + stmt.executeQuery("show databases"); + stmt.executeQuery("show tables"); + Throwable error = PostExecHook.error; + if (error != null) { + throw error; + } + error = PreExecHook.error; + if (error != null) { + throw error; + } Assert.assertEquals(System.getProperty("user.name"), PostExecHook.userName); - Assert.assertNotNull(PostExecHook.ipAddress); - Assert.assertTrue(PostExecHook.ipAddress.contains("127.0.0.1")); + Assert.assertNotNull(PostExecHook.ipAddress, "ipaddress is null"); + Assert.assertNotNull(PostExecHook.userName, "userName is null"); + Assert.assertNotNull(PostExecHook.operation , "operation is null"); + Assert.assertTrue(PostExecHook.ipAddress, PostExecHook.ipAddress.contains("127.0.0.1")); + Assert.assertEquals("SHOWTABLES", PostExecHook.operation); Assert.assertEquals(System.getProperty("user.name"), PreExecHook.userName); - Assert.assertNotNull(PreExecHook.ipAddress); - Assert.assertTrue(PreExecHook.ipAddress.contains("127.0.0.1")); - - connection.close(); + Assert.assertNotNull(PreExecHook.ipAddress, "ipaddress is null"); + Assert.assertNotNull(PreExecHook.userName, "userName is null"); + Assert.assertNotNull(PreExecHook.operation , "operation is null"); + Assert.assertTrue(PreExecHook.ipAddress, PreExecHook.ipAddress.contains("127.0.0.1")); + Assert.assertEquals("SHOWTABLES", PreExecHook.operation); } } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java index fa8c43b..1bd6f2a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java @@ -1115,6 +1115,12 @@ public void testDriverProperties() throws SQLException { } + public void testInvalidUrl() throws SQLException { + HiveDriver driver = new HiveDriver(); + + assertNull(driver.connect("jdbc:hive2://localhost:1000", null)); + } + private static void assertDpi(DriverPropertyInfo dpi, String name, String value) { assertEquals("Invalid DriverPropertyInfo name", name, dpi.name); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java index 7b86b0c..7f0a6b3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java @@ -76,6 +76,21 @@ public void testTxns() throws Exception { } @Test + public void testOpenTxnNotExcluded() throws Exception { + List tids = client.openTxns("me", 3).getTxn_ids(); + Assert.assertEquals(1L, (long) tids.get(0)); + Assert.assertEquals(2L, (long) tids.get(1)); + Assert.assertEquals(3L, (long) tids.get(2)); + client.rollbackTxn(1); + client.commitTxn(2); + ValidTxnList validTxns = client.getValidTxns(3); + Assert.assertFalse(validTxns.isTxnCommitted(1)); + Assert.assertTrue(validTxns.isTxnCommitted(2)); + Assert.assertTrue(validTxns.isTxnCommitted(3)); + Assert.assertFalse(validTxns.isTxnCommitted(4)); + } + + @Test public void testTxnRange() throws Exception { ValidTxnList validTxns = client.getValidTxns(); Assert.assertEquals(ValidTxnList.RangeResponse.NONE, diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java index 3e4c34a..3b5f65f 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java @@ -193,43 +193,39 @@ public void testListener() throws Exception { driver.run("create database " + dbName); listSize++; + PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1)); Database db = msc.getDatabase(dbName); assertEquals(listSize, notifyList.size()); - assertEquals(listSize, preNotifyList.size()); + assertEquals(listSize + 1, preNotifyList.size()); + validateCreateDb(db, preDbEvent.getDatabase()); CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1)); assert dbEvent.getStatus(); validateCreateDb(db, dbEvent.getDatabase()); - PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(listSize - 1)); - validateCreateDb(db, preDbEvent.getDatabase()); driver.run("use " + dbName); driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); + PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1)); listSize++; Table tbl = msc.getTable(dbName, tblName); + validateCreateTable(tbl, preTblEvent.getTable()); assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1)); assert tblEvent.getStatus(); validateCreateTable(tbl, tblEvent.getTable()); - PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(listSize - 1)); - validateCreateTable(tbl, preTblEvent.getTable()); - driver.run("alter table tmptbl add partition (b='2011')"); listSize++; - Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011"); assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1)); AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); assert partEvent.getStatus(); + Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011"); validateAddPartition(part, partEvent.getPartitions().get(0)); validateTableInAddPartition(tbl, partEvent.getTable()); - - PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(listSize-1)); validateAddPartition(part, prePartEvent.getPartitions().get(0)); // Test adding multiple partitions in a single partition-set, atomically. @@ -254,7 +250,8 @@ public void testListener() throws Exception { driver.run(String.format("alter table %s touch partition (%s)", tblName, "b='2011'")); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + PreAlterPartitionEvent preAlterPartEvent = + (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1); //the partition did not change, // so the new partition should be similar to the original partition @@ -266,40 +263,39 @@ public void testListener() throws Exception { alterPartEvent.getOldPartition().getTableName(), alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition()); - PreAlterPartitionEvent preAlterPartEvent = - (PreAlterPartitionEvent)preNotifyList.get(listSize - 1); + validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(), preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(), preAlterPartEvent.getNewPartition()); List part_vals = new ArrayList(); part_vals.add("c=2012"); + int preEventListSize; + preEventListSize = preNotifyList.size() + 1; Partition newPart = msc.appendPartition(dbName, tblName, part_vals); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + assertEquals(preNotifyList.size(), preEventListSize); AddPartitionEvent appendPartEvent = (AddPartitionEvent)(notifyList.get(listSize-1)); validateAddPartition(newPart, appendPartEvent.getPartitions().get(0)); PreAddPartitionEvent preAppendPartEvent = - (PreAddPartitionEvent)(preNotifyList.get(listSize-1)); + (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1)); validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0)); driver.run(String.format("alter table %s rename to %s", tblName, renamed)); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1); Table renamedTable = msc.getTable(dbName, renamed); AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1); assert alterTableE.getStatus(); validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable()); - - PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(listSize-1); validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(), preAlterTableE.getNewTable()); @@ -307,20 +303,17 @@ public void testListener() throws Exception { driver.run(String.format("alter table %s rename to %s", renamed, tblName)); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); driver.run(String.format("alter table %s ADD COLUMNS (c int)", tblName)); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1); Table altTable = msc.getTable(dbName, tblName); alterTableE = (AlterTableEvent) notifyList.get(listSize-1); assert alterTableE.getStatus(); validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable()); - - preAlterTableE = (PreAlterTableEvent) preNotifyList.get(listSize-1); validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(), preAlterTableE.getNewTable()); @@ -329,7 +322,6 @@ public void testListener() throws Exception { msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1); assert partMarkEvent.getStatus(); @@ -337,46 +329,42 @@ public void testListener() throws Exception { partMarkEvent.getPartitionName()); PreLoadPartitionDoneEvent prePartMarkEvent = - (PreLoadPartitionDoneEvent)preNotifyList.get(listSize - 1); + (PreLoadPartitionDoneEvent)preNotifyList.get(preNotifyList.size() - 1); validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(), prePartMarkEvent.getPartitionName()); driver.run(String.format("alter table %s drop partition (b='2011')", tblName)); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList + .size() - 1); DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1); assert dropPart.getStatus(); validateDropPartition(part, dropPart.getPartition()); validateTableInDropPartition(tbl, dropPart.getTable()); - PreDropPartitionEvent preDropPart = (PreDropPartitionEvent)preNotifyList.get(listSize - 1); validateDropPartition(part, preDropPart.getPartition()); validateTableInDropPartition(tbl, preDropPart.getTable()); driver.run("drop table " + tblName); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1); DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1); assert dropTbl.getStatus(); validateDropTable(tbl, dropTbl.getTable()); - - PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(listSize-1); validateDropTable(tbl, preDropTbl.getTable()); driver.run("drop database " + dbName); listSize++; assertEquals(notifyList.size(), listSize); - assertEquals(preNotifyList.size(), listSize); + PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1); DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1); assert dropDB.getStatus(); validateDropDb(db, dropDB.getDatabase()); - - PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(listSize-1); validateDropDb(db, preDropDB.getDatabase()); SetProcessor.setVariable("metaconf:hive.metastore.try.direct.sql", "false"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java index 7600e99..63eea27 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java @@ -21,6 +21,7 @@ import junit.framework.TestCase; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.util.StringUtils; @@ -49,6 +50,8 @@ protected void setUp() throws Exception { int port = MetaStoreUtils.findFreePort(); System.out.println("Starting MetaStore Server on port " + port); + System.setProperty(ConfVars.METASTORE_EVENT_LISTENERS.varname, + IpAddressListener.class.getName()); MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); isServerStarted = true; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestCreateUdfEntities.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestCreateUdfEntities.java new file mode 100644 index 0000000..c60f856 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestCreateUdfEntities.java @@ -0,0 +1,74 @@ +package org.apache.hadoop.hive.ql; + +import static org.junit.Assert.*; + +import java.net.URI; +import java.util.Set; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.hooks.Entity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +public class TestCreateUdfEntities { + private Driver driver; + private String funcName = "print_test"; + + @Before + public void setUp() throws Exception { + + HiveConf conf = new HiveConf(Driver.class); + SessionState.start(conf); + driver = new Driver(conf); + driver.init(); + } + + @After + public void tearDown() throws Exception { + driver.run("drop function " + funcName); + driver.close(); + SessionState.get().close(); + } + + @Test + public void testUdfWithLocalResource() throws Exception { + int rc = driver.compile("CREATE FUNCTION " + funcName + " AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFPrintf' " + + " using file '" + "file:///tmp/udf1.jar'"); + assertEquals(0, rc); + WriteEntity outputEntities[] = driver.getPlan().getOutputs().toArray(new WriteEntity[] {}); + assertEquals(outputEntities.length, 3); + + assertEquals(Entity.Type.DATABASE, outputEntities[0].getType()); + assertEquals("default", outputEntities[0].getDatabase().getName()); + + assertEquals(Entity.Type.FUNCTION, outputEntities[1].getType()); + assertEquals(funcName, outputEntities[1].getFunctionName()); + + assertEquals(Entity.Type.LOCAL_DIR, outputEntities[2].getType()); + assertEquals("file:/tmp/udf1.jar", outputEntities[2].getLocation().toString()); + } + + @Test + public void testUdfWithDfsResource() throws Exception { + int rc = driver.compile("CREATE FUNCTION default." + funcName + " AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFPrintf' " + + " using file '" + "hdfs:///tmp/udf1.jar'"); + assertEquals(0, rc); + WriteEntity outputEntities[] = driver.getPlan().getOutputs().toArray(new WriteEntity[] {}); + assertEquals(outputEntities.length, 3); + + assertEquals(Entity.Type.DATABASE, outputEntities[0].getType()); + assertEquals("default", outputEntities[0].getDatabase().getName()); + + assertEquals(Entity.Type.FUNCTION, outputEntities[1].getType()); + assertEquals(funcName, outputEntities[1].getFunctionName()); + + assertEquals(Entity.Type.DFS_DIR, outputEntities[2].getType()); + assertEquals("hdfs:/tmp/udf1.jar", outputEntities[2].getLocation().toString()); + } + +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 7afcd47..76c1636 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -139,7 +139,7 @@ public void testSimpleQuery() { SessionState.start(ss); - String cmd = "select a.key from src a"; + String cmd = "select a.key+1 from src a"; Driver d = new Driver(conf); int ret = d.run(cmd).getResponseCode(); if (ret != 0) { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java new file mode 100644 index 0000000..b59d2e1 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import java.net.URI; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; +import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.security.UserGroupInformation; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; + +/** + * Base class for some storage based authorization test classes + */ +public class StorageBasedMetastoreTestBase { + protected HiveConf clientHiveConf; + protected HiveMetaStoreClient msc; + protected Driver driver; + protected UserGroupInformation ugi; + private static int objNum = 0; + + protected String getAuthorizationProvider(){ + return StorageBasedAuthorizationProvider.class.getName(); + } + + protected HiveConf createHiveConf() throws Exception { + return new HiveConf(this.getClass()); + } + + @Before + public void setUp() throws Exception { + + int port = MetaStoreUtils.findFreePort(); + + // Turn on metastore-side authorization + System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, + AuthorizationPreEventListener.class.getName()); + System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, + getAuthorizationProvider()); + System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, + InjectableDummyAuthenticator.class.getName()); + + MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + + clientHiveConf = createHiveConf(); + + // Turn off client-side authorization + clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); + + clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + + clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + + ugi = ShimLoader.getHadoopShims().getUGIForConf(clientHiveConf); + + SessionState.start(new CliSessionState(clientHiveConf)); + msc = new HiveMetaStoreClient(clientHiveConf, null); + driver = new Driver(clientHiveConf); + + setupFakeUser(); + InjectableDummyAuthenticator.injectMode(false); + } + + protected void setupFakeUser() { + String fakeUser = "mal"; + List fakeGroupNames = new ArrayList(); + fakeGroupNames.add("groupygroup"); + + InjectableDummyAuthenticator.injectUserName(fakeUser); + InjectableDummyAuthenticator.injectGroupNames(fakeGroupNames); + } + + protected String setupUser() { + return ugi.getUserName(); + } + + protected String getTestTableName() { + return this.getClass().getSimpleName() + "tab" + ++objNum; + } + + protected String getTestDbName() { + return this.getClass().getSimpleName() + "db" + ++objNum; + } + + @After + public void tearDown() throws Exception { + InjectableDummyAuthenticator.injectMode(false); + } + + protected void setPermissions(String locn, String permissions) throws Exception { + FileSystem fs = FileSystem.get(new URI(locn), clientHiveConf); + fs.setPermission(new Path(locn), FsPermission.valueOf(permissions)); + } + + protected void validateCreateDb(Database expectedDb, String dbName) { + Assert.assertEquals(expectedDb.getName().toLowerCase(), dbName.toLowerCase()); + } + + +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java index fff1ed2..b8f5dd1 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java @@ -171,41 +171,36 @@ public void testListener() throws Exception { driver.run("create database " + dbName); listSize++; - Database db = msc.getDatabase(dbName); - Database dbFromEvent = (Database)assertAndExtractSingleObjectFromEvent(listSize, authCalls, DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.DB); + Database db = msc.getDatabase(dbName); validateCreateDb(db,dbFromEvent); driver.run("use " + dbName); driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName)); - listSize++; - Table tbl = msc.getTable(dbName, tblName); + listSize = authCalls.size(); Table tblFromEvent = ( (org.apache.hadoop.hive.ql.metadata.Table) assertAndExtractSingleObjectFromEvent(listSize, authCalls, DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.TABLE)) .getTTable(); + Table tbl = msc.getTable(dbName, tblName); validateCreateTable(tbl, tblFromEvent); driver.run("alter table tmptbl add partition (b='2011')"); - listSize++; - Partition part = msc.getPartition("hive3705", "tmptbl", "b=2011"); + listSize = authCalls.size(); Partition ptnFromEvent = ( (org.apache.hadoop.hive.ql.metadata.Partition) assertAndExtractSingleObjectFromEvent(listSize, authCalls, DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.PARTITION)) .getTPartition(); + Partition part = msc.getPartition("hive3705", "tmptbl", "b=2011"); validateAddPartition(part,ptnFromEvent); driver.run(String.format("alter table %s touch partition (%s)", tblName, "b='2011'")); - listSize++; - - //the partition did not change, - // so the new partition should be similar to the original partition - Partition modifiedP = msc.getPartition(dbName, tblName, "b=2011"); + listSize = authCalls.size(); Partition ptnFromEventAfterAlter = ( (org.apache.hadoop.hive.ql.metadata.Partition) @@ -213,6 +208,9 @@ public void testListener() throws Exception { DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.PARTITION)) .getTPartition(); + //the partition did not change, + // so the new partition should be similar to the original partition + Partition modifiedP = msc.getPartition(dbName, tblName, "b=2011"); validateAlterPartition(part, modifiedP, ptnFromEventAfterAlter.getDbName(), ptnFromEventAfterAlter.getTableName(), ptnFromEventAfterAlter.getValues(), ptnFromEventAfterAlter); @@ -220,8 +218,9 @@ public void testListener() throws Exception { List part_vals = new ArrayList(); part_vals.add("c=2012"); - Partition newPart = msc.appendPartition(dbName, tblName, part_vals); + listSize = authCalls.size(); + Partition newPart = msc.appendPartition(dbName, tblName, part_vals); listSize++; Partition newPtnFromEvent = ( @@ -233,25 +232,23 @@ public void testListener() throws Exception { driver.run(String.format("alter table %s rename to %s", tblName, renamed)); - listSize++; + listSize = authCalls.size(); - Table renamedTable = msc.getTable(dbName, renamed); Table renamedTableFromEvent = ( (org.apache.hadoop.hive.ql.metadata.Table) assertAndExtractSingleObjectFromEvent(listSize, authCalls, DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.TABLE)) .getTTable(); + Table renamedTable = msc.getTable(dbName, renamed); validateAlterTable(tbl, renamedTable, renamedTableFromEvent, renamedTable); assertFalse(tbl.getTableName().equals(renamedTable.getTableName())); //change the table name back driver.run(String.format("alter table %s rename to %s", renamed, tblName)); - listSize++; - driver.run(String.format("alter table %s drop partition (b='2011')", tblName)); - listSize++; + listSize = authCalls.size(); Partition ptnFromDropPartition = ( (org.apache.hadoop.hive.ql.metadata.Partition) @@ -262,7 +259,7 @@ public void testListener() throws Exception { validateDropPartition(modifiedP, ptnFromDropPartition); driver.run("drop table " + tblName); - listSize++; + listSize = authCalls.size(); Table tableFromDropTableEvent = ( (org.apache.hadoop.hive.ql.metadata.Table) assertAndExtractSingleObjectFromEvent(listSize, authCalls, @@ -290,16 +287,16 @@ public void testListener() throws Exception { } tCustom.setTableName(tbl.getTableName() + "_custom"); + listSize = authCalls.size(); msc.createTable(tCustom); listSize++; - Table customCreatedTable = msc.getTable(tCustom.getDbName(), tCustom.getTableName()); Table customCreatedTableFromEvent = ( (org.apache.hadoop.hive.ql.metadata.Table) assertAndExtractSingleObjectFromEvent(listSize, authCalls, DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.TABLE)) .getTTable(); - + Table customCreatedTable = msc.getTable(tCustom.getDbName(), tCustom.getTableName()); validateCreateTable(tCustom,customCreatedTable); validateCreateTable(tCustom,customCreatedTableFromEvent); @@ -316,8 +313,10 @@ public void testListener() throws Exception { assertEquals(tCustom.getSd().getSerdeInfo().getSerializationLib(), customCreatedTableFromEvent.getSd().getSerdeInfo().getSerializationLib()); - msc.dropTable(tCustom.getDbName(),tCustom.getTableName()); - listSize++; + listSize = authCalls.size(); + msc.dropTable(tCustom.getDbName(), tCustom.getTableName()); + listSize += 2; + Table table2FromDropTableEvent = ( (org.apache.hadoop.hive.ql.metadata.Table) assertAndExtractSingleObjectFromEvent(listSize, authCalls, @@ -327,7 +326,7 @@ public void testListener() throws Exception { validateDropTable(tCustom, table2FromDropTableEvent); driver.run("drop database " + dbName); - listSize++; + listSize = authCalls.size(); Database dbFromDropDatabaseEvent = (Database)assertAndExtractSingleObjectFromEvent(listSize, authCalls, DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.DB); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java index c869469..3bde2fc 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java @@ -89,6 +89,7 @@ protected void setUp() throws Exception { AuthorizationPreEventListener.class.getName()); System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, getAuthorizationProvider()); + setupMetaStoreReadAuthorization(); System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, InjectableDummyAuthenticator.class.getName()); System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, ""); @@ -115,6 +116,13 @@ protected void setUp() throws Exception { driver = new Driver(clientHiveConf); } + protected void setupMetaStoreReadAuthorization() { + // read authorization does not work with default/legacy authorization mode + // It is a chicken and egg problem granting select privilege to database, as the + // grant statement would invoke get_database which needs select privilege + System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname, "false"); + } + @Override protected void tearDown() throws Exception { super.tearDown(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java index d98f599..299812f 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java @@ -88,6 +88,7 @@ public void testMultipleAuthorizationListners() throws Exception { // verify that the actual action also went through Database db = msc.getDatabase(dbName); + listSize += 2; // 1 read database auth calls for each authorization provider Database dbFromEvent = (Database)assertAndExtractSingleObjectFromEvent(listSize, authCalls, DummyHiveMetastoreAuthorizationProvider.AuthCallContextType.DB); validateCreateDb(db,dbFromEvent); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java index 6cf8565..dfaa080 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationDrops.java @@ -18,88 +18,19 @@ package org.apache.hadoop.hive.ql.security; -import java.net.URI; -import java.util.ArrayList; -import java.util.List; - -import junit.framework.TestCase; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hive.cli.CliSessionState; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; -import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.security.UserGroupInformation; +import org.junit.Assert; +import org.junit.Test; /** * Test cases focusing on drop table permission checks */ -public class TestStorageBasedMetastoreAuthorizationDrops extends TestCase{ - protected HiveConf clientHiveConf; - protected HiveMetaStoreClient msc; - protected Driver driver; - protected UserGroupInformation ugi; - private static int objNum = 0; - - protected String getAuthorizationProvider(){ - return StorageBasedAuthorizationProvider.class.getName(); - } - - protected HiveConf createHiveConf() throws Exception { - return new HiveConf(this.getClass()); - } - - @Override - protected void setUp() throws Exception { - - super.setUp(); - - int port = MetaStoreUtils.findFreePort(); - - // Turn on metastore-side authorization - System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, - AuthorizationPreEventListener.class.getName()); - System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER.varname, - getAuthorizationProvider()); - System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname, - InjectableDummyAuthenticator.class.getName()); - - MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); - - clientHiveConf = createHiveConf(); - - // Turn off client-side authorization - clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); - - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - - ugi = ShimLoader.getHadoopShims().getUGIForConf(clientHiveConf); - - SessionState.start(new CliSessionState(clientHiveConf)); - msc = new HiveMetaStoreClient(clientHiveConf, null); - driver = new Driver(clientHiveConf); - - setupFakeUser(); - InjectableDummyAuthenticator.injectMode(false); - } - +public class TestStorageBasedMetastoreAuthorizationDrops extends StorageBasedMetastoreTestBase { + @Test public void testDropDatabase() throws Exception { dropDatabaseByOtherUser("-rwxrwxrwx", 0); dropDatabaseByOtherUser("-rwxrwxrwt", 1); @@ -111,12 +42,12 @@ public void testDropDatabase() throws Exception { * @param expectedRet - expected return code for drop by other user * @throws Exception */ - private void dropDatabaseByOtherUser(String perm, int expectedRet) throws Exception { + public void dropDatabaseByOtherUser(String perm, int expectedRet) throws Exception { String dbName = getTestDbName(); setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), perm); CommandProcessorResponse resp = driver.run("create database " + dbName); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); Database db = msc.getDatabase(dbName); validateCreateDb(db, dbName); @@ -124,10 +55,11 @@ private void dropDatabaseByOtherUser(String perm, int expectedRet) throws Except resp = driver.run("drop database " + dbName); - assertEquals(expectedRet, resp.getResponseCode()); + Assert.assertEquals(expectedRet, resp.getResponseCode()); } + @Test public void testDropTable() throws Exception { dropTableByOtherUser("-rwxrwxrwx", 0); dropTableByOtherUser("-rwxrwxrwt", 1); @@ -138,13 +70,13 @@ public void testDropTable() throws Exception { * @param expectedRet expected return code on drop table * @throws Exception */ - private void dropTableByOtherUser(String perm, int expectedRet) throws Exception { + public void dropTableByOtherUser(String perm, int expectedRet) throws Exception { String dbName = getTestDbName(); String tblName = getTestTableName(); setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx"); CommandProcessorResponse resp = driver.run("create database " + dbName); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); Database db = msc.getDatabase(dbName); validateCreateDb(db, dbName); @@ -152,18 +84,19 @@ private void dropTableByOtherUser(String perm, int expectedRet) throws Exception String dbDotTable = dbName + "." + tblName; resp = driver.run("create table " + dbDotTable + "(i int)"); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); InjectableDummyAuthenticator.injectMode(true); resp = driver.run("drop table " + dbDotTable); - assertEquals(expectedRet, resp.getResponseCode()); + Assert.assertEquals(expectedRet, resp.getResponseCode()); } /** * Drop view should not be blocked by SBA. View will not have any location to drop. * @throws Exception */ + @Test public void testDropView() throws Exception { String dbName = getTestDbName(); String tblName = getTestTableName(); @@ -171,7 +104,7 @@ public void testDropView() throws Exception { setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx"); CommandProcessorResponse resp = driver.run("create database " + dbName); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); Database db = msc.getDatabase(dbName); validateCreateDb(db, dbName); @@ -179,20 +112,20 @@ public void testDropView() throws Exception { String dbDotTable = dbName + "." + tblName; resp = driver.run("create table " + dbDotTable + "(i int)"); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); String dbDotView = dbName + "." + viewName; resp = driver.run("create view " + dbDotView + " as select * from " + dbDotTable); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); resp = driver.run("drop view " + dbDotView); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); resp = driver.run("drop table " + dbDotTable); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); } - + @Test public void testDropPartition() throws Exception { dropPartitionByOtherUser("-rwxrwxrwx", 0); dropPartitionByOtherUser("-rwxrwxrwt", 1); @@ -203,70 +136,29 @@ public void testDropPartition() throws Exception { * @param expectedRet expected return code * @throws Exception */ - private void dropPartitionByOtherUser(String perm, int expectedRet) throws Exception { + public void dropPartitionByOtherUser(String perm, int expectedRet) throws Exception { String dbName = getTestDbName(); String tblName = getTestTableName(); setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx"); CommandProcessorResponse resp = driver.run("create database " + dbName); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); Database db = msc.getDatabase(dbName); validateCreateDb(db, dbName); setPermissions(db.getLocationUri(), "-rwxrwxrwx"); String dbDotTable = dbName + "." + tblName; resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (b string)"); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); Table tab = msc.getTable(dbName, tblName); setPermissions(tab.getSd().getLocation(), perm); resp = driver.run("alter table " + dbDotTable + " add partition (b='2011')"); - assertEquals(0, resp.getResponseCode()); + Assert.assertEquals(0, resp.getResponseCode()); InjectableDummyAuthenticator.injectMode(true); resp = driver.run("alter table " + dbDotTable + " drop partition (b='2011')"); - assertEquals(expectedRet, resp.getResponseCode()); - } - - private void setupFakeUser() { - String fakeUser = "mal"; - List fakeGroupNames = new ArrayList(); - fakeGroupNames.add("groupygroup"); - - InjectableDummyAuthenticator.injectUserName(fakeUser); - InjectableDummyAuthenticator.injectGroupNames(fakeGroupNames); - } - - private String setupUser() { - return ugi.getUserName(); - } - - private String getTestTableName() { - return this.getClass().getSimpleName() + "tab" + ++objNum; + Assert.assertEquals(expectedRet, resp.getResponseCode()); } - private String getTestDbName() { - return this.getClass().getSimpleName() + "db" + ++objNum; - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); - InjectableDummyAuthenticator.injectMode(false); - } - - protected void setPermissions(String locn, String permissions) throws Exception { - FileSystem fs = FileSystem.get(new URI(locn), clientHiveConf); - fs.setPermission(new Path(locn), FsPermission.valueOf(permissions)); - } - - private void validateCreateDb(Database expectedDb, String dbName) { - assertEquals(expectedDb.getName().toLowerCase(), dbName.toLowerCase()); - } - - private void validateCreateTable(Table expectedTable, String tblName, String dbName) { - assertNotNull(expectedTable); - assertEquals(expectedTable.getTableName().toLowerCase(),tblName.toLowerCase()); - assertEquals(expectedTable.getDbName().toLowerCase(),dbName.toLowerCase()); - } } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java index dc08271..78ff780 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProvider.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider; @@ -102,4 +103,10 @@ protected String getTestTableName(){ return super.getTestTableName() + "_SBAP"; } + @Override + protected void setupMetaStoreReadAuthorization() { + // enable read authorization in metastore + System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname, "true"); + } + } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java new file mode 100644 index 0000000..6f45a59 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.security; + +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.CommandNeedRetryException; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test cases focusing on drop table permission checks + */ +public class TestStorageBasedMetastoreAuthorizationReads extends StorageBasedMetastoreTestBase { + + @Test + public void testReadTableSuccess() throws Exception { + readTableByOtherUser("-rwxrwxrwx", true); + } + + @Test + public void testReadTableFailure() throws Exception { + readTableByOtherUser("-rwxrwx---", false); + } + + /** + * @param perm dir permission for table dir + * @param isSuccess if command was successful + * @throws Exception + */ + private void readTableByOtherUser(String perm, boolean isSuccess) throws Exception { + String dbName = getTestDbName(); + String tblName = getTestTableName(); + setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), "-rwxrwxrwx"); + + CommandProcessorResponse resp = driver.run("create database " + dbName); + Assert.assertEquals(0, resp.getResponseCode()); + Database db = msc.getDatabase(dbName); + validateCreateDb(db, dbName); + + setPermissions(db.getLocationUri(), "-rwxrwxrwx"); + + String dbDotTable = dbName + "." + tblName; + resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (date string)"); + Assert.assertEquals(0, resp.getResponseCode()); + Table tab = msc.getTable(dbName, tblName); + setPermissions(tab.getSd().getLocation(), perm); + + InjectableDummyAuthenticator.injectMode(true); + + testCmd(driver, "DESCRIBE " + dbDotTable, isSuccess); + testCmd(driver, "DESCRIBE EXTENDED " + dbDotTable, isSuccess); + testCmd(driver, "SHOW PARTITIONS " + dbDotTable, isSuccess); + testCmd(driver, "SHOW COLUMNS IN " + tblName + " IN " + dbName, isSuccess); + testCmd(driver, "use " + dbName, true); + testCmd(driver, "SHOW TABLE EXTENDED LIKE " + tblName, isSuccess); + + } + + @Test + public void testReadDbSuccess() throws Exception { + readDbByOtherUser("-rwxrwxrwx", true); + } + + @Test + public void testReadDbFailure() throws Exception { + readDbByOtherUser("-rwxrwx---", false); + } + + + /** + * @param perm dir permission for database dir + * @param isSuccess if command was successful + * @throws Exception + */ + private void readDbByOtherUser(String perm, boolean isSuccess) throws Exception { + String dbName = getTestDbName(); + setPermissions(clientHiveConf.getVar(ConfVars.METASTOREWAREHOUSE), perm); + + CommandProcessorResponse resp = driver.run("create database " + dbName); + Assert.assertEquals(0, resp.getResponseCode()); + Database db = msc.getDatabase(dbName); + validateCreateDb(db, dbName); + setPermissions(db.getLocationUri(), perm); + + InjectableDummyAuthenticator.injectMode(true); + + testCmd(driver, "DESCRIBE DATABASE " + dbName, isSuccess); + testCmd(driver, "DESCRIBE DATABASE EXTENDED " + dbName, isSuccess); + testCmd(driver, "SHOW TABLES IN " + dbName, isSuccess); + driver.run("use " + dbName); + testCmd(driver, "SHOW TABLES ", isSuccess); + + } + + private void testCmd(Driver driver, String cmd, boolean isSuccess) + throws CommandNeedRetryException { + CommandProcessorResponse resp = driver.run(cmd); + Assert.assertEquals(isSuccess, resp.getResponseCode() == 0); + } + + +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index 3c99068..79cf58b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -98,7 +98,7 @@ public static void beforeTest() throws Exception { runCmd("create database " + dbName); // Need a separate table for ACID testing since it has to be bucketed and it has to be Acid runCmd("create table " + acidTableName + " (i int, j int) clustered by (i) into 2 buckets " + - "stored as orc"); + "stored as orc TBLPROPERTIES ('transactional'='true')"); } private static void runCmd(String cmd) throws CommandNeedRetryException { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java index 7ac7ebc..f40b7da 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/thrift/TestHadoop20SAuthBridge.java @@ -47,6 +47,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.DefaultImpersonationProvider; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; @@ -129,7 +130,7 @@ private void configureSuperUserIPAddresses(Configuration conf, } builder.append("127.0.1.1,"); builder.append(InetAddress.getLocalHost().getCanonicalHostName()); - conf.setStrings(ProxyUsers.getProxySuperuserIpConfKey(superUserShortName), + conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName), builder.toString()); } @@ -292,7 +293,7 @@ public String run() throws Exception { private void setGroupsInConf(String[] groupNames, String proxyUserName) throws IOException { conf.set( - ProxyUsers.getProxySuperuserGroupConfKey(proxyUserName), + DefaultImpersonationProvider.getProxySuperuserGroupConfKey(proxyUserName), StringUtils.join(",", Arrays.asList(groupNames))); configureSuperUserIPAddresses(conf, proxyUserName); ProxyUsers.refreshSuperUserGroupsConfiguration(conf); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java index 1e66542..6561743 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java @@ -477,4 +477,31 @@ public void testEmbeddedBeelineConnection() throws Throwable{ final String EXPECTED_PATTERN = "embedded_table"; testScriptFile(TEST_NAME, SCRIPT_TEXT, EXPECTED_PATTERN, true, argList); } + + /** + * Test Beeline could show the query progress for time-consuming query. + * @throws Throwable + */ + @Test + public void testQueryProgress() throws Throwable { + final String TEST_NAME = "testQueryProgress"; + final String SCRIPT_TEXT = "set hive.support.concurrency = false;\n" + + "select count(*) from " + tableName + ";\n"; + final String EXPECTED_PATTERN = "Parsing command"; + testScriptFile(TEST_NAME, SCRIPT_TEXT, EXPECTED_PATTERN, true, getBaseArgs(JDBC_URL)); + } + + /** + * Test Beeline will hide the query progress when silent option is set. + * @throws Throwable + */ + @Test + public void testQueryProgressHidden() throws Throwable { + final String TEST_NAME = "testQueryProgress"; + final String SCRIPT_TEXT = "set hive.support.concurrency = false;\n" + + "!set silent true\n" + + "select count(*) from " + tableName + ";\n"; + final String EXPECTED_PATTERN = "Parsing command"; + testScriptFile(TEST_NAME, SCRIPT_TEXT, EXPECTED_PATTERN, false, getBaseArgs(JDBC_URL)); + } } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index daf8e9e..15ff1f4 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -1318,6 +1318,42 @@ public void testDatabaseMetaData() throws SQLException { } @Test + public void testResultSetColumnNameCaseInsensitive() throws SQLException { + Statement stmt = con.createStatement(); + ResultSet res; + + res = stmt.executeQuery("select c1 from " + dataTypeTableName + " limit 1"); + try { + int count = 0; + while (res.next()) { + res.findColumn("c1"); + res.findColumn("C1"); + count++; + } + assertEquals(count, 1); + } catch (Exception e) { + String msg = "Unexpected exception: " + e; + LOG.info(msg, e); + fail(msg); + } + + res = stmt.executeQuery("select c1 C1 from " + dataTypeTableName + " limit 1"); + try { + int count = 0; + while (res.next()) { + res.findColumn("c1"); + res.findColumn("C1"); + count++; + } + assertEquals(count, 1); + } catch (Exception e) { + String msg = "Unexpected exception: " + e; + LOG.info(msg, e); + fail(msg); + } + } + + @Test public void testResultSetMetaData() throws SQLException { Statement stmt = con.createStatement(); @@ -2130,4 +2166,82 @@ public void testNonAsciiReturnValues() throws Exception { } stmt.close(); } + + /** + * Test getting query log method in Jdbc + * @throws Exception + */ + @Test + public void testGetQueryLog() throws Exception { + // Prepare + String[] expectedLogs = { + "Parsing command", + "Parse Completed", + "Starting Semantic Analysis", + "Semantic Analysis Completed", + "Starting command" + }; + String sql = "select count(*) from " + tableName; + + // Verify the fetched log (from the beginning of log file) + HiveStatement stmt = (HiveStatement)con.createStatement(); + assertNotNull("Statement is null", stmt); + stmt.executeQuery(sql); + List logs = stmt.getQueryLog(false, 10000); + stmt.close(); + verifyFetchedLog(logs, expectedLogs); + + // Verify the fetched log (incrementally) + final HiveStatement statement = (HiveStatement)con.createStatement(); + assertNotNull("Statement is null", statement); + statement.setFetchSize(10000); + final List incrementalLogs = new ArrayList(); + + Runnable logThread = new Runnable() { + @Override + public void run() { + while (statement.hasMoreLogs()) { + try { + incrementalLogs.addAll(statement.getQueryLog()); + Thread.sleep(500); + } catch (SQLException e) { + LOG.error("Failed getQueryLog. Error message: " + e.getMessage()); + fail("error in getting log thread"); + } catch (InterruptedException e) { + LOG.error("Getting log thread is interrupted. Error message: " + e.getMessage()); + fail("error in getting log thread"); + } + } + } + }; + + Thread thread = new Thread(logThread); + thread.setDaemon(true); + thread.start(); + statement.executeQuery(sql); + thread.interrupt(); + thread.join(10000); + // fetch remaining logs + List remainingLogs; + do { + remainingLogs = statement.getQueryLog(); + incrementalLogs.addAll(remainingLogs); + } while (remainingLogs.size() > 0); + statement.close(); + + verifyFetchedLog(incrementalLogs, expectedLogs); + } + + private void verifyFetchedLog(List logs, String[] expectedLogs) { + StringBuilder stringBuilder = new StringBuilder(); + + for (String log : logs) { + stringBuilder.append(log); + } + + String accumulatedLogs = stringBuilder.toString(); + for (String expectedLog : expectedLogs) { + assertTrue(accumulatedLogs.contains(expectedLog)); + } + } } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java index d03dba4..0bb3c0a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java @@ -120,9 +120,10 @@ private void verifyContextContents(final String cmd, String ctxCmd) throws SQLEx HiveAuthzContext context = contextCapturer.getValue(); assertEquals("Command ", ctxCmd, context.getCommandString()); - assertTrue("ip address pattern check", context.getIpAddress().contains(".")); + assertTrue("ip address pattern check", context.getIpAddress().matches("[.:a-fA-F0-9]+")); // ip address size check - check for something better than non zero assertTrue("ip address size check", context.getIpAddress().length() > 7); + } private Connection getConnection(String userName) throws SQLException { diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml index 0e0cee0..338cad3 100644 --- a/itests/qtest-spark/pom.xml +++ b/itests/qtest-spark/pom.xml @@ -53,6 +53,38 @@ test + org.apache.tez + tez-api + ${tez.version} + true + + + org.apache.hadoop + hadoop-common + + + org.apache.hadoop + hadoop-mapreduce-client-core + + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + + + org.apache.hadoop + hadoop-mapreduce-client-common + + + org.apache.hadoop + hadoop-hdfs + + + org.apache.hadoop + hadoop-yarn-client + + + + org.eclipse.jetty jetty-util ${spark.jetty.version} diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 73f1c44..55aa9f0 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -55,6 +55,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\ bucket2.q,\ bucket3.q,\ bucket4.q,\ + cbo_correctness.q,\ correlationoptimizer1.q,\ count.q,\ create_merge_compressed.q,\ @@ -74,6 +75,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\ disable_merge_for_bucketing.q,\ dynpart_sort_opt_vectorization.q,\ dynpart_sort_optimization.q,\ + dynpart_sort_optimization2.q,\ enforce_order.q,\ filter_join_breaktask.q,\ filter_join_breaktask2.q,\ @@ -116,6 +118,7 @@ minitez.query.files.shared=alter_merge_2_orc.q,\ orc_merge7.q,\ orc_merge_incompat1.q,\ orc_merge_incompat2.q,\ + orc_vectorization_ppd.q,\ parallel.q,\ ptf.q,\ sample1.q,\ @@ -152,24 +155,73 @@ minitez.query.files.shared=alter_merge_2_orc.q,\ update_where_non_partitioned.q,\ update_where_partitioned.q,\ update_two_cols.q,\ + vector_between_in.q,\ vector_cast_constant.q,\ + vector_char_4.q,\ + vector_char_simple.q,\ + vector_count_distinct.q,\ vector_data_types.q,\ vector_decimal_aggregate.q,\ + vector_distinct_2.q,\ + vector_elt.q,\ + vector_groupby_3.q,\ vector_left_outer_join.q,\ vector_mapjoin_reduce.q,\ + vector_non_string_partition.q,\ + vector_orderby_5.q,\ vector_string_concat.q,\ + vector_varchar_4.q,\ + vector_varchar_simple.q,\ + vectorization_0.q,\ + vectorization_1.q,\ + vectorization_10.q,\ + vectorization_11.q,\ vectorization_12.q,\ vectorization_13.q,\ vectorization_14.q,\ vectorization_15.q,\ + vectorization_16.q,\ + vectorization_2.q,\ + vectorization_3.q,\ + vectorization_4.q,\ + vectorization_5.q,\ + vectorization_6.q,\ vectorization_9.q,\ + vectorization_decimal_date.q,\ + vectorization_div0.q,\ + vectorization_nested_udf.q,\ + vectorization_not.q,\ + vectorization_part.q,\ vectorization_part_project.q,\ + vectorization_pushdown.q,\ vectorization_short_regress.q,\ + vectorized_bucketmapjoin1.q,\ + vectorized_case.q,\ + vectorized_context.q,\ vectorized_mapjoin.q,\ + vectorized_math_funcs.q,\ vectorized_nested_mapjoin.q,\ + vectorized_parquet.q,\ vectorized_ptf.q,\ + vectorized_rcfile_columnar.q,\ vectorized_shufflejoin.q,\ - vectorized_timestamp_funcs.q + vectorized_string_funcs.q,\ + vectorized_timestamp_funcs.q,\ + auto_sortmerge_join_1.q,\ + auto_sortmerge_join_10.q,\ + auto_sortmerge_join_11.q,\ + auto_sortmerge_join_12.q,\ + auto_sortmerge_join_13.q,\ + auto_sortmerge_join_14.q,\ + auto_sortmerge_join_15.q,\ + auto_sortmerge_join_16.q,\ + auto_sortmerge_join_2.q,\ + auto_sortmerge_join_3.q,\ + auto_sortmerge_join_4.q,\ + auto_sortmerge_join_5.q,\ + auto_sortmerge_join_7.q,\ + auto_sortmerge_join_8.q,\ + auto_sortmerge_join_9.q minitez.query.files=bucket_map_join_tez1.q,\ bucket_map_join_tez2.q,\ @@ -186,7 +238,11 @@ minitez.query.files=bucket_map_join_tez1.q,\ tez_joins_explain.q,\ tez_schema_evolution.q,\ tez_union.q,\ - tez_union_decimal.q + tez_union_decimal.q,\ + tez_union_group_by.q,\ + tez_smb_main.q,\ + tez_smb_1.q,\ + vectorized_dynamic_partition_pruning.q beeline.positive.exclude=add_part_exist.q,\ alter1.q,\ @@ -342,6 +398,7 @@ beeline.positive.exclude=add_part_exist.q,\ minimr.query.negative.files=cluster_tasklog_retrieval.q,\ file_with_header_footer_negative.q,\ + local_mapred_error_cache.q,\ mapreduce_stack_trace.q,\ mapreduce_stack_trace_hadoop20.q,\ mapreduce_stack_trace_turnoff.q,\ diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 4a60f52..f5e35b8 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -690,7 +690,10 @@ public void init() throws Exception { // conf.logVars(System.out); // System.out.flush(); + String execEngine = conf.get("hive.execution.engine"); + conf.set("hive.execution.engine", "mr"); SessionState.start(conf); + conf.set("hive.execution.engine", execEngine); db = Hive.get(conf); fs = FileSystem.get(conf); drv = new Driver(conf); @@ -771,6 +774,8 @@ private CliSessionState startSessionState() HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, "org.apache.hadoop.hive.ql.security.DummyAuthenticator"); + String execEngine = conf.get("hive.execution.engine"); + conf.set("hive.execution.engine", "mr"); CliSessionState ss = new CliSessionState(conf); assert ss != null; ss.in = System.in; @@ -788,6 +793,7 @@ private CliSessionState startSessionState() isSessionStateStarted = true; + conf.set("hive.execution.engine", execEngine); return ss; } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDAFTestMax.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDAFTestMax.java index 7b7fd71..f9842fa 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDAFTestMax.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDAFTestMax.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.udf; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDAF; import org.apache.hadoop.hive.ql.exec.UDAFEvaluator; import org.apache.hadoop.hive.serde2.io.DoubleWritable; @@ -32,6 +33,8 @@ * UDAFTestMax. * */ +@Description(name = "test_max", +value = "_FUNC_(col) - UDF to report Max Value") public class UDAFTestMax extends UDAF { /** diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFFileLookup.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFFileLookup.java index 61c7e0c..f804764 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFFileLookup.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFFileLookup.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; @@ -34,6 +35,8 @@ /** * A UDF for testing, which does key/value lookup from a file */ +@Description(name = "lookup", +value = "_FUNC_(col) - UDF for key/value lookup from a file") public class UDFFileLookup extends UDF { static Log LOG = LogFactory.getLog(UDFFileLookup.class); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestErrorOnFalse.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestErrorOnFalse.java index 66a30ab..382fa44 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestErrorOnFalse.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestErrorOnFalse.java @@ -18,11 +18,14 @@ package org.apache.hadoop.hive.ql.udf; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; /** * A UDF for testing, which throws RuntimeException if the length of a string. */ +@Description(name = "test_error", +value = "_FUNC_(col) - UDF throws RuntimeException if expression evaluates to false") public class UDFTestErrorOnFalse extends UDF { public int evaluate(Boolean b) { diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestLength.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestLength.java index 9e75c51..da3ea38 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestLength.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestLength.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.udf; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; @@ -25,6 +26,8 @@ /** * A UDF for testing, which evaluates the length of a string. */ +@Description(name = "testlength", +value = "_FUNC_(col) - UDF evaluates the length of the string") public class UDFTestLength extends UDF { IntWritable result = new IntWritable(); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestLength2.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestLength2.java index b1aab45..ac083f8 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestLength2.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/UDFTestLength2.java @@ -18,12 +18,15 @@ package org.apache.hadoop.hive.ql.udf; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDF; /** * A UDF for testing, which evaluates the length of a string. This UDF uses Java * Primitive classes for parameters. */ +@Description(name = "testlength2", +value = "_FUNC_(col) - UDF evaluates the length of the string and returns value as Java Integer") public class UDFTestLength2 extends UDF { public Integer evaluate(String s) { diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/DummyContextUDF.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/DummyContextUDF.java index d3b525e..5fa63f1 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/DummyContextUDF.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/DummyContextUDF.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.MapredContext; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -26,7 +27,8 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.Reporter; - +@Description(name = "counter", +value = "_FUNC_(col) - UDF to report MR counter values") public class DummyContextUDF extends GenericUDF { private MapredContext context; diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestGetJavaBoolean.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestGetJavaBoolean.java index 4ec7431..bf6c7c2 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestGetJavaBoolean.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestGetJavaBoolean.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -27,6 +28,8 @@ /** * A test GenericUDF to return native Java's boolean type */ +@Description(name = "test_udf_get_java_boolean", +value = "_FUNC_(str) - GenericUDF to return native Java's boolean type") public class GenericUDFTestGetJavaBoolean extends GenericUDF { ObjectInspector[] argumentOIs; diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestGetJavaString.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestGetJavaString.java index ead45ae..914cebf 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestGetJavaString.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestGetJavaString.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -27,6 +28,8 @@ /** * A test GenericUDF to return native Java's string type */ +@Description(name = "test_udf_get_java_string", +value = "_FUNC_(str) - GenericUDF to return native Java's string type") public class GenericUDFTestGetJavaString extends GenericUDF { ObjectInspector[] argumentOIs; diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestTranslate.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestTranslate.java index dedf91d..0ab8d0b 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestTranslate.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTestTranslate.java @@ -21,6 +21,7 @@ import java.util.HashSet; import java.util.Set; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; @@ -34,6 +35,8 @@ /** * Mimics oracle's function translate(str1, str2, str3). */ +@Description(name = "test_translate", +value = "_FUNC_(str1, str2, str3) - Mimics oracle's function translate(str1, str2, str3)") public class GenericUDFTestTranslate extends GenericUDF { private transient ObjectInspector[] argumentOIs; diff --git a/jdbc/pom.xml b/jdbc/pom.xml index 253b840..215dc86 100644 --- a/jdbc/pom.xml +++ b/jdbc/pom.xml @@ -41,8 +41,14 @@ org.apache.hive - hive-metastore + hive-service ${project.version} + + + org.apache.hive + hive-exec + + org.apache.hive @@ -51,12 +57,12 @@ org.apache.hive - hive-service + hive-metastore ${project.version} org.apache.hive - hive-exec + hive-shims ${project.version} @@ -135,8 +141,39 @@ true true ${hive.jdbc.driver.classifier} + + + org.apache.hive.shims:hive-shims-common + + org/apache/hadoop/hive/shims/* + org/apache/hadoop/hive/thrift/* + + + + org.apache.hive.shims:hive-shims-common-secure + + org/apache/hadoop/hive/thrift/* + org/apache/hadoop/hive/thrift/client/* + + + + org.apache.hive.shims:hive-shims-0.23 + + org/apache/hadoop/hive/thrift/* + + + + *:* + + META-INF/*.SF + META-INF/*.DSA + META-INF/*.RSA + + + + org.apache.commons:commons-compress org.apache.hadoop:* org.apache.hive:hive-ant org.apache.ant:* @@ -150,23 +187,16 @@ org.tukaani:* org.iq80.snappy:* org.apache.velocity:* + net.sf.jpam:* + org.apache.avro:* + net.sf.opencsv:* + org.antlr:* - - - - *:* - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - + - diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDriver.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDriver.java index 68c0788..68f1d15 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDriver.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDriver.java @@ -102,8 +102,9 @@ public boolean acceptsURL(String url) throws SQLException { return Pattern.matches(URL_PREFIX + ".*", url); } + @Override public Connection connect(String url, Properties info) throws SQLException { - return new HiveConnection(url, info); + return acceptsURL(url) ? new HiveConnection(url, info) : null; } /** diff --git a/jdbc/src/java/org/apache/hive/jdbc/ClosedOrCancelledStatementException.java b/jdbc/src/java/org/apache/hive/jdbc/ClosedOrCancelledStatementException.java new file mode 100644 index 0000000..9880208 --- /dev/null +++ b/jdbc/src/java/org/apache/hive/jdbc/ClosedOrCancelledStatementException.java @@ -0,0 +1,29 @@ +/** + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. See accompanying LICENSE file. + */ + +package org.apache.hive.jdbc; + +import java.sql.SQLException; + +public class ClosedOrCancelledStatementException extends SQLException{ + + private static final long serialVersionUID = 0; + + /** + * @param msg (exception message) + */ + public ClosedOrCancelledStatementException(String msg) { + super(msg); + } +} diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java index ec5e555..8cbf9e7 100644 --- a/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveBaseResultSet.java @@ -58,6 +58,7 @@ protected boolean wasNull = false; protected Object[] row; protected List columnNames; + protected List normalizedColumnNames; protected List columnTypes; protected List columnAttributes; @@ -84,7 +85,7 @@ public void deleteRow() throws SQLException { } public int findColumn(String columnName) throws SQLException { - int columnIndex = columnNames.indexOf(columnName); + int columnIndex = normalizedColumnNames.indexOf(columnName.toLowerCase()); if (columnIndex==-1) { throw new SQLException(); } else { diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveMetaDataResultSet.java b/jdbc/src/java/org/apache/hive/jdbc/HiveMetaDataResultSet.java index d1ac109..c1bd898 100644 --- a/jdbc/src/java/org/apache/hive/jdbc/HiveMetaDataResultSet.java +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveMetaDataResultSet.java @@ -36,8 +36,13 @@ public HiveMetaDataResultSet(final List columnNames } if (columnNames!=null) { this.columnNames = new ArrayList(columnNames); + this.normalizedColumnNames = new ArrayList(); + for (String colName : columnNames) { + this.normalizedColumnNames.add(colName.toLowerCase()); + } } else { this.columnNames = new ArrayList(); + this.normalizedColumnNames = new ArrayList(); } if (columnTypes!=null) { this.columnTypes = new ArrayList(columnTypes); diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java b/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java index 86bc580..7bc09cd 100644 --- a/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveQueryResultSet.java @@ -28,6 +28,7 @@ import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -75,6 +76,7 @@ private boolean fetchFirst = false; private final TProtocolVersion protocol; + private ReentrantLock transportLock; public static class Builder { @@ -98,6 +100,7 @@ private int fetchSize = 50; private boolean emptyResultSet = false; private boolean isScrollable = false; + private ReentrantLock transportLock = null; public Builder(Statement statement) throws SQLException { this.statement = statement; @@ -166,6 +169,11 @@ public Builder setScrollable(boolean setScrollable) { return this; } + public Builder setTransportLock(ReentrantLock transportLock) { + this.transportLock = transportLock; + return this; + } + public HiveQueryResultSet build() throws SQLException { return new HiveQueryResultSet(this); } @@ -181,7 +189,9 @@ protected HiveQueryResultSet(Builder builder) throws SQLException { this.stmtHandle = builder.stmtHandle; this.sessHandle = builder.sessHandle; this.fetchSize = builder.fetchSize; + this.transportLock = builder.transportLock; columnNames = new ArrayList(); + normalizedColumnNames = new ArrayList(); columnTypes = new ArrayList(); columnAttributes = new ArrayList(); if (builder.retrieveSchema) { @@ -239,7 +249,17 @@ private void retrieveSchema() throws SQLException { try { TGetResultSetMetadataReq metadataReq = new TGetResultSetMetadataReq(stmtHandle); // TODO need session handle - TGetResultSetMetadataResp metadataResp = client.GetResultSetMetadata(metadataReq); + TGetResultSetMetadataResp metadataResp; + if (transportLock == null) { + metadataResp = client.GetResultSetMetadata(metadataReq); + } else { + transportLock.lock(); + try { + metadataResp = client.GetResultSetMetadata(metadataReq); + } finally { + transportLock.unlock(); + } + } Utils.verifySuccess(metadataResp.getStatus()); StringBuilder namesSb = new StringBuilder(); @@ -260,6 +280,7 @@ private void retrieveSchema() throws SQLException { } String columnName = columns.get(pos).getColumnName(); columnNames.add(columnName); + normalizedColumnNames.add(columnName.toLowerCase()); TPrimitiveTypeEntry primitiveTypeEntry = columns.get(pos).getTypeDesc().getTypes().get(0).getPrimitiveEntry(); String columnTypeName = TYPE_NAMES.get(primitiveTypeEntry.getType()); @@ -284,6 +305,10 @@ private void setSchema(List colNames, List colTypes, columnNames.addAll(colNames); columnTypes.addAll(colTypes); columnAttributes.addAll(colAttributes); + + for (String colName : colNames) { + normalizedColumnNames.add(colName.toLowerCase()); + } } @Override @@ -326,7 +351,17 @@ public boolean next() throws SQLException { if (fetchedRows == null || !fetchedRowsItr.hasNext()) { TFetchResultsReq fetchReq = new TFetchResultsReq(stmtHandle, orientation, fetchSize); - TFetchResultsResp fetchResp = client.FetchResults(fetchReq); + TFetchResultsResp fetchResp; + if (transportLock == null) { + fetchResp = client.FetchResults(fetchReq); + } else { + transportLock.lock(); + try { + fetchResp = client.FetchResults(fetchReq); + } finally { + transportLock.unlock(); + } + } Utils.verifySuccessWithInfo(fetchResp.getStatus()); TRowSet results = fetchResp.getResults(); diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java index 2cbf58c..d8e33d3 100644 --- a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java @@ -23,10 +23,14 @@ import java.sql.SQLException; import java.sql.SQLFeatureNotSupportedException; import java.sql.SQLWarning; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.locks.ReentrantLock; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.RowSetFactory; import org.apache.hive.service.cli.thrift.TCLIService; import org.apache.hive.service.cli.thrift.TCancelOperationReq; import org.apache.hive.service.cli.thrift.TCancelOperationResp; @@ -38,6 +42,9 @@ import org.apache.hive.service.cli.thrift.TGetOperationStatusResp; import org.apache.hive.service.cli.thrift.TOperationHandle; import org.apache.hive.service.cli.thrift.TSessionHandle; +import org.apache.hive.service.cli.thrift.TFetchResultsReq; +import org.apache.hive.service.cli.thrift.TFetchResultsResp; +import org.apache.hive.service.cli.thrift.TFetchOrientation; /** * HiveStatement. @@ -77,6 +84,27 @@ */ private boolean isClosed = false; + /** + * Keep state so we can fail certain calls made after cancel(). + */ + private boolean isCancelled = false; + + /** + * Keep this state so we can know whether the query in this statement is closed. + */ + private boolean isQueryClosed = false; + + /** + * Keep this state so we can know whether the query logs are being generated in HS2. + */ + private boolean isLogBeingGenerated = true; + + /** + * Keep this state so we can know whether the statement is submitted to HS2 and start execution + * successfully. + */ + private boolean isExecuteStatementFailed = false; + // A fair reentrant lock private ReentrantLock transportLock = new ReentrantLock(true); @@ -113,6 +141,9 @@ public void addBatch(String sql) throws SQLException { @Override public void cancel() throws SQLException { checkConnection("cancel"); + if (isCancelled) { + return; + } transportLock.lock(); try { @@ -128,6 +159,7 @@ public void cancel() throws SQLException { } finally { transportLock.unlock(); } + isCancelled = true; } /* @@ -167,6 +199,8 @@ void closeClientOperation() throws SQLException { } finally { transportLock.unlock(); } + isQueryClosed = true; + isExecuteStatementFailed = false; stmtHandle = null; } @@ -202,6 +236,7 @@ public boolean execute(String sql) throws SQLException { checkConnection("execute"); closeClientOperation(); + initFlags(); TExecuteStatementReq execReq = new TExecuteStatementReq(sessHandle, sql); /** @@ -218,9 +253,12 @@ public boolean execute(String sql) throws SQLException { TExecuteStatementResp execResp = client.ExecuteStatement(execReq); Utils.verifySuccessWithInfo(execResp.getStatus()); stmtHandle = execResp.getOperationHandle(); + isExecuteStatementFailed = false; } catch (SQLException eS) { + isExecuteStatementFailed = true; throw eS; } catch (Exception ex) { + isExecuteStatementFailed = true; throw new SQLException(ex.toString(), "08S01", ex); } finally { transportLock.unlock(); @@ -266,11 +304,14 @@ public boolean execute(String sql) throws SQLException { } } } catch (SQLException e) { + isLogBeingGenerated = false; throw e; } catch (Exception e) { + isLogBeingGenerated = false; throw new SQLException(e.toString(), "08S01", e); } } + isLogBeingGenerated = false; // The query should be completed by now if (!stmtHandle.isHasResultSet()) { @@ -278,7 +319,7 @@ public boolean execute(String sql) throws SQLException { } resultSet = new HiveQueryResultSet.Builder(this).setClient(client).setSessionHandle(sessHandle) .setStmtHandle(stmtHandle).setMaxRows(maxRows).setFetchSize(fetchSize) - .setScrollable(isScrollableResultset) + .setScrollable(isScrollableResultset).setTransportLock(transportLock) .build(); return true; } @@ -289,6 +330,13 @@ private void checkConnection(String action) throws SQLException { } } + private void initFlags() { + isCancelled = false; + isQueryClosed = false; + isLogBeingGenerated = true; + isExecuteStatementFailed = false; + } + /* * (non-Javadoc) * @@ -713,4 +761,93 @@ public boolean isWrapperFor(Class iface) throws SQLException { throw new SQLException("Cannot unwrap to " + iface); } + /** + * Check whether query execution might be producing more logs to be fetched. + * This method is a public API for usage outside of Hive, although it is not part of the + * interface java.sql.Statement. + * @return true if query execution might be producing more logs. It does not indicate if last + * log lines have been fetched by getQueryLog. + */ + public boolean hasMoreLogs() { + return isLogBeingGenerated; + } + + /** + * Get the execution logs of the given SQL statement. + * This method is a public API for usage outside of Hive, although it is not part of the + * interface java.sql.Statement. + * This method gets the incremental logs during SQL execution, and uses fetchSize holden by + * HiveStatement object. + * @return a list of logs. It can be empty if there are no new logs to be retrieved at that time. + * @throws SQLException + * @throws ClosedOrCancelledStatementException if statement has been cancelled or closed + */ + public List getQueryLog() throws SQLException, ClosedOrCancelledStatementException { + return getQueryLog(true, fetchSize); + } + + /** + * Get the execution logs of the given SQL statement. + * This method is a public API for usage outside of Hive, although it is not part of the + * interface java.sql.Statement. + * @param incremental indicate getting logs either incrementally or from the beginning, + * when it is true or false. + * @param fetchSize the number of lines to fetch + * @return a list of logs. It can be empty if there are no new logs to be retrieved at that time. + * @throws SQLException + * @throws ClosedOrCancelledStatementException if statement has been cancelled or closed + */ + public List getQueryLog(boolean incremental, int fetchSize) + throws SQLException, ClosedOrCancelledStatementException { + checkConnection("getQueryLog"); + if (isCancelled) { + throw new ClosedOrCancelledStatementException("Method getQueryLog() failed. The " + + "statement has been closed or cancelled."); + } + + List logs = new ArrayList(); + TFetchResultsResp tFetchResultsResp = null; + transportLock.lock(); + try { + if (stmtHandle != null) { + TFetchResultsReq tFetchResultsReq = new TFetchResultsReq(stmtHandle, + getFetchOrientation(incremental), fetchSize); + tFetchResultsReq.setFetchType((short)1); + tFetchResultsResp = client.FetchResults(tFetchResultsReq); + Utils.verifySuccessWithInfo(tFetchResultsResp.getStatus()); + } else { + if (isQueryClosed) { + throw new ClosedOrCancelledStatementException("Method getQueryLog() failed. The " + + "statement has been closed or cancelled."); + } + if (isExecuteStatementFailed) { + throw new SQLException("Method getQueryLog() failed. Because the stmtHandle in " + + "HiveStatement is null and the statement execution might fail."); + } else { + return logs; + } + } + } catch (SQLException e) { + throw e; + } catch (Exception e) { + throw new SQLException("Error when getting query log: " + e, e); + } finally { + transportLock.unlock(); + } + + RowSet rowSet = RowSetFactory.create(tFetchResultsResp.getResults(), + connection.getProtocol()); + for (Object[] row : rowSet) { + logs.add((String)row[0]); + } + return logs; + } + + private TFetchOrientation getFetchOrientation(boolean incremental) { + if (incremental) { + return TFetchOrientation.FETCH_NEXT; + } else { + return TFetchOrientation.FETCH_FIRST; + } + } } diff --git a/metastore/scripts/upgrade/mssql/003-HIVE-8239.mssql.sql b/metastore/scripts/upgrade/mssql/003-HIVE-8239.mssql.sql new file mode 100644 index 0000000..0f850e2 --- /dev/null +++ b/metastore/scripts/upgrade/mssql/003-HIVE-8239.mssql.sql @@ -0,0 +1,103 @@ +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +-- Drop the primary key constraint on table COMPACTION_QUEUE +DECLARE @sqlcmd NVARCHAR(MAX) + +SELECT @sqlcmd = 'ALTER TABLE COMPACTION_QUEUE DROP CONSTRAINT ' + name + ';' + FROM sys.key_constraints + WHERE [type] = 'PK' + AND [parent_object_id] = OBJECT_ID('COMPACTION_QUEUE') + +EXECUTE (@sqlcmd); + +ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_ID bigint NOT NULL; + +-- Restore the primary key constraint on table COMPACTION_QUEUE +ALTER TABLE COMPACTION_QUEUE ADD CONSTRAINT PK_COMPACTION_CQID PRIMARY KEY CLUSTERED (CQ_ID ASC); + +ALTER TABLE COMPACTION_QUEUE ALTER COLUMN CQ_START bigint NULL; + + +ALTER TABLE COMPLETED_TXN_COMPONENTS ALTER COLUMN CTC_TXNID bigint NULL; + + +-- Drop the primary key constraint on table HIVE_LOCKS +DECLARE @sqlcmd NVARCHAR(MAX) + +SELECT @sqlcmd = 'ALTER TABLE HIVE_LOCKS DROP CONSTRAINT ' + name + ';' + FROM sys.key_constraints + WHERE [type] = 'PK' + AND [parent_object_id] = OBJECT_ID('HIVE_LOCKS') + +EXECUTE (@sqlcmd); + +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_LOCK_EXT_ID bigint NOT NULL; + +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_LOCK_INT_ID bigint NOT NULL; + +-- Restore the composite primary key constraint on table HIVE_LOCKS +ALTER TABLE HIVE_LOCKS ADD CONSTRAINT PK_HL_LOCKEXTID_LOCKINTID + PRIMARY KEY CLUSTERED (HL_LOCK_EXT_ID ASC, HL_LOCK_INT_ID ASC); + +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_TXNID bigint NULL; + +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_LAST_HEARTBEAT bigint NOT NULL; + +ALTER TABLE HIVE_LOCKS ALTER COLUMN HL_ACQUIRED_AT bigint NULL; + + +ALTER TABLE NEXT_COMPACTION_QUEUE_ID ALTER COLUMN NCQ_NEXT bigint NOT NULL; + + +ALTER TABLE NEXT_LOCK_ID ALTER COLUMN NL_NEXT bigint NOT NULL; + + +ALTER TABLE NEXT_TXN_ID ALTER COLUMN NTXN_NEXT bigint NOT NULL; + + +-- Drop the foreign key constraint on table TXN_COMPONENTS, this is required +-- before we drop the primary key constraint on table TXNS +DECLARE @sqlcmd NVARCHAR(MAX) + +SELECT @sqlcmd = 'ALTER TABLE TXN_COMPONENTS DROP CONSTRAINT ' + name + ';' + FROM sys.foreign_keys + WHERE [parent_object_id] = OBJECT_ID('TXN_COMPONENTS') + +EXECUTE (@sqlcmd); + +-- Drop the primary key constraint on table TXNS +DECLARE @sqlcmd NVARCHAR(MAX) + +SELECT @sqlcmd = 'ALTER TABLE TXNS DROP CONSTRAINT ' + name + ';' + FROM sys.key_constraints + WHERE [type] = 'PK' + AND [parent_object_id] = OBJECT_ID('TXNS') + +EXECUTE (@sqlcmd); + +ALTER TABLE TXNS ALTER COLUMN TXN_ID bigint NOT NULL; + +-- Restore the primary key constraint on table TXNS +ALTER TABLE TXNS ADD CONSTRAINT PK_TXNS_TXNID PRIMARY KEY CLUSTERED (TXN_ID ASC); + +ALTER TABLE TXNS ALTER COLUMN TXN_STARTED bigint NOT NULL; + +ALTER TABLE TXNS ALTER COLUMN TXN_LAST_HEARTBEAT bigint NOT NULL; + +ALTER TABLE TXN_COMPONENTS ALTER COLUMN TC_TXNID bigint NULL; + +-- Restore the foreign key constraint on table TXN_COMPONENTS +ALTER TABLE TXN_COMPONENTS WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID); diff --git a/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql index a9f7b83..174ed39 100644 --- a/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/hive-schema-0.14.0.mssql.sql @@ -836,14 +836,14 @@ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID); -- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file. -- ----------------------------------------------------------------------------------------------------------------------------------------------- CREATE TABLE COMPACTION_QUEUE( - CQ_ID int NOT NULL, + CQ_ID bigint NOT NULL, CQ_DATABASE varchar(128) NOT NULL, CQ_TABLE varchar(128) NOT NULL, CQ_PARTITION varchar(767) NULL, CQ_STATE char(1) NOT NULL, CQ_TYPE char(1) NOT NULL, CQ_WORKER_ID varchar(128) NULL, - CQ_START int NULL, + CQ_START bigint NULL, CQ_RUN_AS varchar(128) NULL, PRIMARY KEY CLUSTERED ( @@ -852,23 +852,23 @@ PRIMARY KEY CLUSTERED ); CREATE TABLE COMPLETED_TXN_COMPONENTS( - CTC_TXNID int NULL, + CTC_TXNID bigint NULL, CTC_DATABASE varchar(128) NOT NULL, CTC_TABLE varchar(128) NULL, CTC_PARTITION varchar(767) NULL ); CREATE TABLE HIVE_LOCKS( - HL_LOCK_EXT_ID int NOT NULL, - HL_LOCK_INT_ID int NOT NULL, - HL_TXNID int NULL, + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NULL, HL_DB varchar(128) NOT NULL, HL_TABLE varchar(128) NULL, HL_PARTITION varchar(767) NULL, HL_LOCK_STATE char(1) NOT NULL, HL_LOCK_TYPE char(1) NOT NULL, - HL_LAST_HEARTBEAT int NOT NULL, - HL_ACQUIRED_AT int NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint NULL, HL_USER varchar(128) NOT NULL, HL_HOST varchar(128) NOT NULL, PRIMARY KEY CLUSTERED @@ -879,28 +879,28 @@ PRIMARY KEY CLUSTERED ); CREATE TABLE NEXT_COMPACTION_QUEUE_ID( - NCQ_NEXT int NOT NULL + NCQ_NEXT bigint NOT NULL ); INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); CREATE TABLE NEXT_LOCK_ID( - NL_NEXT int NOT NULL + NL_NEXT bigint NOT NULL ); INSERT INTO NEXT_LOCK_ID VALUES(1); CREATE TABLE NEXT_TXN_ID( - NTXN_NEXT int NOT NULL + NTXN_NEXT bigint NOT NULL ); INSERT INTO NEXT_TXN_ID VALUES(1); CREATE TABLE TXNS( - TXN_ID int NOT NULL, + TXN_ID bigint NOT NULL, TXN_STATE char(1) NOT NULL, - TXN_STARTED int NOT NULL, - TXN_LAST_HEARTBEAT int NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, TXN_USER varchar(128) NOT NULL, TXN_HOST varchar(128) NOT NULL, PRIMARY KEY CLUSTERED @@ -910,7 +910,7 @@ PRIMARY KEY CLUSTERED ); CREATE TABLE TXN_COMPONENTS( - TC_TXNID int NULL, + TC_TXNID bigint NULL, TC_DATABASE varchar(128) NOT NULL, TC_TABLE varchar(128) NULL, TC_PARTITION varchar(767) NULL diff --git a/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql index 2b5f3b8..1bda6d5 100644 --- a/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql @@ -1,6 +1,7 @@ SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE; :r 002-HIVE-7784.mssql.sql; +:r 003-HIVE-8239.mssql.sql; UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE; diff --git a/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql index 9b09555..126f7cf 100644 --- a/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql @@ -1,5 +1,6 @@ SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS Status from dual; +@019-HIVE-7118.oracle.sql; @020-HIVE-7784.oracle.sql; UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 33745e4..47eca29 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -48,9 +48,6 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.regex.Pattern; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableListMultimap; -import com.google.common.collect.Multimaps; import org.apache.commons.cli.OptionBuilder; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -171,6 +168,8 @@ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; import org.apache.hadoop.hive.metastore.events.PreEventContext; import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; import org.apache.hadoop.hive.metastore.model.MDBPrivilege; import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; @@ -203,7 +202,10 @@ import com.facebook.fb303.FacebookBase; import com.facebook.fb303.fb_status; import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableListMultimap; import com.google.common.collect.Lists; +import com.google.common.collect.Multimaps; /** * TODO:pc remove application logic to a separate interface. @@ -803,7 +805,7 @@ public void create_database(final Database db) Exception ex = null; try { try { - if (null != get_database(db.getName())) { + if (null != get_database_core(db.getName())) { throw new AlreadyExistsException("Database " + db.getName() + " already exists"); } } catch (NoSuchObjectException e) { @@ -829,25 +831,45 @@ public void create_database(final Database db) } @Override - public Database get_database(final String name) throws NoSuchObjectException, - MetaException { + public Database get_database(final String name) throws NoSuchObjectException, MetaException { startFunction("get_database", ": " + name); Database db = null; Exception ex = null; try { - db = getMS().getDatabase(name); + db = get_database_core(name); + firePreEvent(new PreReadDatabaseEvent(db, this)); } catch (MetaException e) { ex = e; throw e; } catch (NoSuchObjectException e) { ex = e; throw e; + } finally { + endFunction("get_database", db != null, ex); + } + return db; + } + + /** + * Equivalent to get_database, but does not write to audit logs, or fire pre-event listners. + * Meant to be used for internal hive classes that don't use the thrift interface. + * @param name + * @return + * @throws NoSuchObjectException + * @throws MetaException + */ + public Database get_database_core(final String name) throws NoSuchObjectException, + MetaException { + Database db = null; + try { + db = getMS().getDatabase(name); + } catch (MetaException e) { + throw e; + } catch (NoSuchObjectException e) { + throw e; } catch (Exception e) { - ex = e; assert (e instanceof RuntimeException); throw (RuntimeException) e; - } finally { - endFunction("get_database", db != null, ex); } return db; } @@ -1373,7 +1395,7 @@ private void drop_table_core(final RawStore ms, final String dbname, final Strin try { ms.openTransaction(); // drop any partitions - tbl = get_table(dbname, name); + tbl = get_table_core(dbname, name); if (tbl == null) { throw new NoSuchObjectException(name + " doesn't exist"); } @@ -1424,10 +1446,14 @@ private void drop_table_core(final RawStore ms, final String dbname, final Strin if (!success) { ms.rollbackTransaction(); } else if (deleteData && !isExternal) { + boolean ifPurge = false; + if (envContext != null){ + ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge")); + } // Delete the data in the partitions which have other locations - deletePartitionData(partPaths); + deletePartitionData(partPaths, ifPurge); // Delete the data in the table - deleteTableData(tblPath); + deleteTableData(tblPath, ifPurge); // ok even if the data is not deleted } for (MetaStoreEventListener listener : listeners) { @@ -1444,9 +1470,21 @@ private void drop_table_core(final RawStore ms, final String dbname, final Strin * @param tablePath */ private void deleteTableData(Path tablePath) { + deleteTableData(tablePath, false); + } + + /** + * Deletes the data in a table's location, if it fails logs an error + * + * @param tablePath + * @param ifPurge completely purge the table (skipping trash) while removing + * data from warehouse + */ + private void deleteTableData(Path tablePath, boolean ifPurge) { + if (tablePath != null) { try { - wh.deleteDir(tablePath, true); + wh.deleteDir(tablePath, true, ifPurge); } catch (Exception e) { LOG.error("Failed to delete table directory: " + tablePath + " " + e.getMessage()); @@ -1461,10 +1499,22 @@ private void deleteTableData(Path tablePath) { * @param partPaths */ private void deletePartitionData(List partPaths) { + deletePartitionData(partPaths, false); + } + + /** + * Give a list of partitions' locations, tries to delete each one + * and for each that fails logs an error. + * + * @param partPaths + * @param ifPurge completely purge the partition (skipping trash) while + * removing data from warehouse + */ + private void deletePartitionData(List partPaths, boolean ifPurge) { if (partPaths != null && !partPaths.isEmpty()) { for (Path partPath : partPaths) { try { - wh.deleteDir(partPath, true); + wh.deleteDir(partPath, true, ifPurge); } catch (Exception e) { LOG.error("Failed to delete partition directory: " + partPath + " " + e.getMessage()); @@ -1597,13 +1647,40 @@ public Table get_table(final String dbname, final String name) throws MetaExcept startTableFunction("get_table", dbname, name); Exception ex = null; try { + t = get_table_core(dbname, name); + firePreEvent(new PreReadTableEvent(t, this)); + } catch (MetaException e) { + ex = e; + throw e; + } catch (NoSuchObjectException e) { + ex = e; + throw e; + } finally { + endFunction("get_table", t != null, ex, name); + } + return t; + } + + /** + * Equivalent of get_table, but does not log audits and fire pre-event listener. + * Meant to be used for calls made by other hive classes, that are not using the + * thrift interface. + * @param dbname + * @param name + * @return Table object + * @throws MetaException + * @throws NoSuchObjectException + */ + public Table get_table_core(final String dbname, final String name) throws MetaException, + NoSuchObjectException { + Table t; + try { t = getMS().getTable(dbname, name); if (t == null) { throw new NoSuchObjectException(dbname + "." + name + " table not found"); } } catch (Exception e) { - ex = e; if (e instanceof MetaException) { throw (MetaException) e; } else if (e instanceof NoSuchObjectException) { @@ -1611,8 +1688,6 @@ public Table get_table(final String dbname, final String name) throws MetaExcept } else { throw newMetaException(e); } - } finally { - endFunction("get_table", t != null, ex, name); } return t; } @@ -2390,7 +2465,7 @@ private boolean drop_partition_common(RawStore ms, String db_name, String tbl_na try { ms.openTransaction(); part = ms.getPartition(db_name, tbl_name, part_vals); - tbl = get_table(db_name, tbl_name); + tbl = get_table_core(db_name, tbl_name); firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); if (part == null) { @@ -2484,7 +2559,7 @@ public DropPartitionsResult drop_partitions_req( try { // We need Partition-s for firing events and for result; DN needs MPartition-s to drop. // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes. - tbl = get_table(dbName, tblName); + tbl = get_table_core(dbName, tblName); int minCount = 0; RequestPartsSpec spec = request.getParts(); List partNames = null; @@ -2643,6 +2718,7 @@ public Partition get_partition(final String db_name, final String tbl_name, Partition ret = null; Exception ex = null; try { + fireReadTablePreEvent(db_name, tbl_name); ret = getMS().getPartition(db_name, tbl_name, part_vals); } catch (Exception e) { ex = e; @@ -2659,6 +2735,28 @@ public Partition get_partition(final String db_name, final String tbl_name, return ret; } + /** + * Fire a pre-event for read table operation, if there are any + * pre-event listeners registered + * + * @param db_name + * @param tbl_name + * @throws MetaException + * @throws NoSuchObjectException + */ + private void fireReadTablePreEvent(String dbName, String tblName) throws MetaException, NoSuchObjectException { + if(preListeners.size() > 0) { + // do this only if there is a pre event listener registered (avoid unnecessary + // metastore api call) + Table t = getMS().getTable(dbName, tblName); + if (t == null) { + throw new NoSuchObjectException(dbName + "." + tblName + + " table not found"); + } + firePreEvent(new PreReadTableEvent(t, this)); + } + } + @Override public Partition get_partition_with_auth(final String db_name, final String tbl_name, final List part_vals, @@ -2666,7 +2764,7 @@ public Partition get_partition_with_auth(final String db_name, throws MetaException, NoSuchObjectException, TException { startPartitionFunction("get_partition_with_auth", db_name, tbl_name, part_vals); - + fireReadTablePreEvent(db_name, tbl_name); Partition ret = null; Exception ex = null; try { @@ -2688,7 +2786,7 @@ public Partition get_partition_with_auth(final String db_name, public List get_partitions(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { startTableFunction("get_partitions", db_name, tbl_name); - + fireReadTablePreEvent(db_name, tbl_name); List ret = null; Exception ex = null; try { @@ -2745,7 +2843,7 @@ public Partition get_partition_with_auth(final String db_name, List partitionSpecs = null; try { - Table table = get_table(dbName, tableName); + Table table = get_table_core(dbName, tableName); List partitions = get_partitions(dbName, tableName, (short) max_parts); if (is_partition_spec_grouping_enabled(table)) { @@ -2769,7 +2867,7 @@ public Partition get_partition_with_auth(final String db_name, private static class StorageDescriptorKey { - private StorageDescriptor sd; + private final StorageDescriptor sd; StorageDescriptorKey(StorageDescriptor sd) { this.sd = sd; } @@ -2891,9 +2989,9 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { @Override public List get_partition_names(final String db_name, final String tbl_name, - final short max_parts) throws MetaException { + final short max_parts) throws MetaException, NoSuchObjectException { startTableFunction("get_partition_names", db_name, tbl_name); - + fireReadTablePreEvent(db_name, tbl_name); List ret = null; Exception ex = null; try { @@ -3010,14 +3108,7 @@ public void alter_partitions(final String db_name, final String tbl_name, Exception ex = null; try { for (Partition tmpPart : new_parts) { - try { - for (MetaStorePreEventListener listener : preListeners) { - listener.onEvent( - new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this)); - } - } catch (NoSuchObjectException e) { - throw new MetaException(e.getMessage()); - } + firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this)); } oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts); @@ -3122,7 +3213,7 @@ public void alter_table_with_environment_context(final String dbname, boolean success = false; Exception ex = null; try { - Table oldt = get_table(dbname, name); + Table oldt = get_table_core(dbname, name); firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); alterHandler.alterTable(getMS(), wh, dbname, name, newTable); success = true; @@ -3206,7 +3297,7 @@ public void alter_table_with_environment_context(final String dbname, Exception ex = null; try { try { - tbl = get_table(db, base_table_name); + tbl = get_table_core(db, base_table_name); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } @@ -3266,7 +3357,7 @@ public void alter_table_with_environment_context(final String dbname, Table tbl; try { - tbl = get_table(db, base_table_name); + tbl = get_table_core(db, base_table_name); } catch (NoSuchObjectException e) { throw new UnknownTableException(e.getMessage()); } @@ -3385,6 +3476,7 @@ public String get_config_value(String name, String defaultValue) private Partition get_partition_by_name_core(final RawStore ms, final String db_name, final String tbl_name, final String part_name) throws MetaException, NoSuchObjectException, TException { + fireReadTablePreEvent(db_name, tbl_name); List partVals = null; try { partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); @@ -3406,7 +3498,6 @@ public Partition get_partition_by_name(final String db_name, final String tbl_na startFunction("get_partition_by_name", ": db=" + db_name + " tbl=" + tbl_name + " part=" + part_name); - Partition ret = null; Exception ex = null; try { @@ -3536,6 +3627,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n final List groupNames) throws MetaException, TException, NoSuchObjectException { startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name, part_vals); + fireReadTablePreEvent(db_name, tbl_name); List ret = null; Exception ex = null; try { @@ -3558,6 +3650,7 @@ public boolean drop_partition_by_name_with_environment_context(final String db_n final String tbl_name, final List part_vals, final short max_parts) throws MetaException, TException, NoSuchObjectException { startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); + fireReadTablePreEvent(db_name, tbl_name); List ret = null; Exception ex = null; try { @@ -3726,7 +3819,7 @@ private boolean drop_index_by_name_core(final RawStore ms, String idxTblName = index.getIndexTableName(); if (idxTblName != null) { String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), idxTblName); - Table tbl = get_table(qualified[0], qualified[1]); + Table tbl = get_table_core(qualified[0], qualified[1]); if (tbl.getSd() == null) { throw new MetaException("Table metadata is corrupted"); } @@ -4028,7 +4121,7 @@ public boolean update_partition_column_statistics(ColumnStatistics colStats) } finally { endFunction("write_partition_column_statistics: ", ret != false, null, tableName); } - } + } @Override public boolean delete_partition_column_statistics(String dbName, String tableName, @@ -4083,7 +4176,7 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S final String tblName, final String filter, final short maxParts) throws MetaException, NoSuchObjectException, TException { startTableFunction("get_partitions_by_filter", dbName, tblName); - + fireReadTablePreEvent(dbName, tblName); List ret = null; Exception ex = null; try { @@ -4106,7 +4199,7 @@ public boolean delete_table_column_statistics(String dbName, String tableName, S List partitionSpecs = null; try { - Table table = get_table(dbName, tblName); + Table table = get_table_core(dbName, tblName); List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts); if (is_partition_spec_grouping_enabled(table)) { @@ -4133,6 +4226,7 @@ public PartitionsByExprResult get_partitions_by_expr( PartitionsByExprRequest req) throws TException { String dbName = req.getDbName(), tblName = req.getTblName(); startTableFunction("get_partitions_by_expr", dbName, tblName); + fireReadTablePreEvent(dbName, tblName); PartitionsByExprResult ret = null; Exception ex = null; try { @@ -4169,7 +4263,7 @@ private void rethrowException(Exception e) throws MetaException, NoSuchObjectException, TException { startTableFunction("get_partitions_by_names", dbName, tblName); - + fireReadTablePreEvent(dbName, tblName); List ret = null; Exception ex = null; try { @@ -4214,7 +4308,7 @@ private String getPartName(HiveObjectRef hiveObject) throws MetaException { List partValue = hiveObject.getPartValues(); if (partValue != null && partValue.size() > 0) { try { - Table table = get_table(hiveObject.getDbName(), hiveObject + Table table = get_table_core(hiveObject.getDbName(), hiveObject .getObjectName()); partName = Warehouse .makePartName(table.getPartitionKeys(), partValue); @@ -4658,7 +4752,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType); } - Table tbl = get_table(dbName, tableName); + Table tbl = get_table_core(dbName, tableName); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { return getMS().listPartitionColumnGrantsAll(dbName, tableName, partName, columnName); @@ -4736,7 +4830,7 @@ private PrincipalPrivilegeSet get_user_privilege_set(final String userName, if (dbName == null) { return getMS().listPrincipalPartitionGrantsAll(principalName, principalType); } - Table tbl = get_table(dbName, tableName); + Table tbl = get_table_core(dbName, tableName); String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); if (principalName == null) { return getMS().listPartitionGrantsAll(dbName, tableName, partName); @@ -5394,7 +5488,7 @@ public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) } } - + public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException { return newHMSHandler(name, hiveConf, false); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 8765d53..c25f13e 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -28,7 +28,6 @@ import java.lang.reflect.Proxy; import java.net.InetAddress; import java.net.URI; -import java.net.URISyntaxException; import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -98,7 +97,6 @@ import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionSpec; import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest; import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult; import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest; @@ -122,7 +120,6 @@ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.api.UnlockRequest; -import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.txn.TxnHandler; import org.apache.hadoop.hive.shims.HadoopShims; @@ -763,18 +760,35 @@ public boolean dropPartition(String db_name, String tbl_name, List part_ } /** - * @param name - * @param dbname - * @throws NoSuchObjectException - * @throws MetaException - * @throws TException - * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, - * java.lang.String, boolean) + * {@inheritDoc} + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) */ @Override - public void dropTable(String dbname, String name) - throws NoSuchObjectException, MetaException, TException { - dropTable(dbname, name, true, true, null); + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + dropTable(dbname, name, deleteData, ignoreUnknownTab, null); + } + + /** + * Drop the table and choose whether to save the data in the trash. + * @param ifPurge completely purge the table (skipping trash) while removing + * data from warehouse + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) + throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { + //build new environmentContext with ifPurge; + EnvironmentContext envContext = null; + if(ifPurge){ + Map warehouseOptions = null; + warehouseOptions = new HashMap(); + warehouseOptions.put("ifPurge", "TRUE"); + envContext = new EnvironmentContext(warehouseOptions); + } + dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext); } /** {@inheritDoc} */ @@ -786,23 +800,37 @@ public void dropTable(String tableName, boolean deleteData) } /** + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name) + throws NoSuchObjectException, MetaException, TException { + dropTable(dbname, name, true, true, null); + } + + /** + * Drop the table and choose whether to: delete the underlying table data; + * throw if the table doesn't exist; save the data in the trash. + * * @param dbname * @param name * @param deleteData * delete the underlying data or just delete the table in metadata - * @throws NoSuchObjectException + * @param ignoreUnknownTab + * don't throw if the requested table doesn't exist + * @param envContext + * for communicating with thrift * @throws MetaException + * could not drop table properly + * @throws NoSuchObjectException + * the table wasn't found * @throws TException + * a thrift communication error occurred + * @throws UnsupportedOperationException + * dropping an index table is not allowed * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, * java.lang.String, boolean) */ - @Override - public void dropTable(String dbname, String name, boolean deleteData, - boolean ignoreUnknownTab) throws MetaException, TException, - NoSuchObjectException, UnsupportedOperationException { - dropTable(dbname, name, deleteData, ignoreUnknownTab, null); - } - public void dropTable(String dbname, String name, boolean deleteData, boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { @@ -1283,6 +1311,7 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) } /** {@inheritDoc} */ + @Override public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException{ @@ -1659,7 +1688,12 @@ public void cancelDelegationToken(String tokenStrForm) throws MetaException, TEx @Override public ValidTxnList getValidTxns() throws TException { - return TxnHandler.createValidTxnList(client.get_open_txns()); + return TxnHandler.createValidTxnList(client.get_open_txns(), 0); + } + + @Override + public ValidTxnList getValidTxns(long currentTxn) throws TException { + return TxnHandler.createValidTxnList(client.get_open_txns(), currentTxn); } @Override diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java index cff0718..c3598db 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java @@ -37,12 +37,14 @@ @Override public boolean deleteDir(FileSystem fs, Path f, boolean recursive, - Configuration conf) throws MetaException { + boolean ifPurge, Configuration conf) throws MetaException { LOG.info("deleting " + f); HadoopShims hadoopShim = ShimLoader.getHadoopShims(); try { - if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) { + if (ifPurge) { + LOG.info("Not moving "+ f +" to trash"); + } else if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) { LOG.info("Moved to trash: " + f); return true; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index e86a90a..066ab68 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -192,6 +192,10 @@ * The database for this table * @param tableName * The table to drop + * @param deleteData + * Should we delete the underlying data + * @param ignoreUnknownTab + * don't throw if the requested table doesn't exist * @throws MetaException * Could not drop table properly. * @throws NoSuchObjectException @@ -200,7 +204,16 @@ * A thrift communication error occurred */ void dropTable(String dbname, String tableName, boolean deleteData, - boolean ignoreUknownTab) throws MetaException, TException, + boolean ignoreUnknownTab) throws MetaException, TException, + NoSuchObjectException; + + /** + * @param ifPurge + * completely purge the table (skipping trash) while removing data from warehouse + * @see #dropTable(String, String, boolean, boolean) + */ + public void dropTable(String dbname, String tableName, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException, NoSuchObjectException; /** @@ -226,6 +239,9 @@ void dropTable(String dbname, String tableName, boolean deleteData, void dropTable(String tableName, boolean deleteData) throws MetaException, UnknownTableException, TException, NoSuchObjectException; + /** + * @see #dropTable(String, String, boolean, boolean) + */ void dropTable(String dbname, String tableName) throws MetaException, TException, NoSuchObjectException; @@ -1070,6 +1086,15 @@ Function getFunction(String dbName, String funcName) ValidTxnList getValidTxns() throws TException; /** + * Get a structure that details valid transactions. + * @param currentTxn The current transaction of the caller. This will be removed from the + * exceptions list so that the caller sees records from his own transaction. + * @return list of valid transactions + * @throws TException + */ + ValidTxnList getValidTxns(long currentTxn) throws TException; + + /** * Initiate a transaction. * @param user User who is opening this transaction. This is the Hive user, * not necessarily the OS user. It is assumed that this user has already been diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java index a141793..4f525a4 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java @@ -32,11 +32,12 @@ * delete a directory * * @param f + * @param ifPurge * @param recursive * @return true on success * @throws MetaException */ public boolean deleteDir(FileSystem fs, Path f, boolean recursive, - Configuration conf) throws MetaException; + boolean ifPurge, Configuration conf) throws MetaException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index c95473c..25c180d 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -258,7 +258,7 @@ public static boolean requireCalStats(Configuration hiveConf, Partition oldPart, if (oldPart.getParameters().containsKey(stat)) { Long oldStat = Long.parseLong(oldPart.getParameters().get(stat)); Long newStat = Long.parseLong(newPart.getParameters().get(stat)); - if (oldStat != newStat) { + if (!oldStat.equals(newStat)) { return true; } } @@ -993,7 +993,7 @@ public static Properties getSchema( partString = partString.concat(partStringSep); partString = partString.concat(partKey.getName()); partTypesString = partTypesString.concat(partTypesStringSep); - partTypesString = partTypesString.concat(partKey.getType()); + partTypesString = partTypesString.concat(partKey.getType()); if (partStringSep.length() == 0) { partStringSep = "/"; partTypesStringSep = ":"; @@ -1007,7 +1007,7 @@ public static Properties getSchema( schema .setProperty( org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES, - partTypesString); + partTypesString); } if (parameters != null) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java index cfab73a..4a56bfa 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java @@ -57,6 +57,6 @@ protected void setIpAddress(final TProtocol in) { } protected void setIpAddress(final Socket inSocket) { - HMSHandler.setIpAddress(inSocket.getInetAddress().toString()); + HMSHandler.setIpAddress(inSocket.getInetAddress().getHostAddress()); } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java index a32507d..c99ce5f 100755 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java @@ -224,8 +224,12 @@ public boolean renameDir(Path sourcePath, Path destPath, boolean inheritPerms) t } public boolean deleteDir(Path f, boolean recursive) throws MetaException { + return deleteDir(f, recursive, false); + } + + public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException { FileSystem fs = getFs(f); - return fsHandler.deleteDir(fs, f, recursive, conf); + return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf); } public boolean isEmpty(Path path) throws IOException, MetaException { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java index 4499485..dbc3247 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java @@ -38,6 +38,8 @@ DROP_DATABASE, LOAD_PARTITION_DONE, AUTHORIZATION_API_CALL, + READ_TABLE, + READ_DATABASE } private final PreEventType eventType; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreReadDatabaseEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreReadDatabaseEvent.java new file mode 100644 index 0000000..d415620 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreReadDatabaseEvent.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; +import org.apache.hadoop.hive.metastore.api.Database; + +/** + * Database read event + */ +public class PreReadDatabaseEvent extends PreEventContext { + private final Database db; + + public PreReadDatabaseEvent(Database db, HMSHandler handler) { + super(PreEventType.READ_DATABASE, handler); + this.db = db; + } + + /** + * @return the db + */ + public Database getDatabase() { + return db; + } + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreReadTableEvent.java b/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreReadTableEvent.java new file mode 100644 index 0000000..b93da67 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreReadTableEvent.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.events; + +import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler; +import org.apache.hadoop.hive.metastore.api.Table; + +/** + * Table read event + */ +public class PreReadTableEvent extends PreEventContext { + + private final Table table; + + public PreReadTableEvent(Table table, HMSHandler handler) { + super(PreEventType.READ_TABLE, handler); + this.table = table; + } + + /** + * @return the table + */ + public Table getTable() { + return table; + } + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index b71bb41..6f44169 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -233,12 +233,22 @@ public GetOpenTxnsResponse getOpenTxns() throws MetaException { } } - public static ValidTxnList createValidTxnList(GetOpenTxnsResponse txns) { + /** + * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a + * {@link org.apache.hadoop.hive.common.ValidTxnList}. + * @param txns txn list from the metastore + * @param currentTxn Current transaction that the user has open. If this is greater than 0 it + * will be removed from the exceptions list so that the user sees his own + * transaction as valid. + * @return a valid txn list. + */ + public static ValidTxnList createValidTxnList(GetOpenTxnsResponse txns, long currentTxn) { long highWater = txns.getTxn_high_water_mark(); Set open = txns.getOpen_txns(); - long[] exceptions = new long[open.size()]; + long[] exceptions = new long[open.size() - (currentTxn > 0 ? 1 : 0)]; int i = 0; for(long txn: open) { + if (currentTxn > 0 && currentTxn == txn) continue; exceptions[i++] = txn; } return new ValidTxnListImpl(exceptions, highWater); diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java b/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java index ed0f713..d8ec281 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/IpAddressListener.java @@ -47,15 +47,10 @@ public IpAddressListener(Configuration config) { super(config); } - private String getIpFromInetAddress(String addr) { - return addr.substring(addr.indexOf('/') + 1); - } - private void checkIpAddress() { try { - String localhostIp = InetAddress.getByName(LOCAL_HOST).toString(); - Assert.assertEquals(getIpFromInetAddress(localhostIp), - getIpFromInetAddress(HMSHandler.getIpAddress())); + String localhostIp = InetAddress.getByName(LOCAL_HOST).getHostAddress(); + Assert.assertEquals(localhostIp, HMSHandler.getIpAddress()); } catch (UnknownHostException e) { Assert.assertTrue("InetAddress.getLocalHost threw an exception: " + e.getMessage(), false); } diff --git a/pom.xml b/pom.xml index c6f2382..2d24058 100644 --- a/pom.xml +++ b/pom.xml @@ -115,7 +115,7 @@ 2.1.6 0.20.2 1.2.1 - 2.4.0 + 2.5.0 ${basedir}/${hive.path.to.root}/testutils/hadoop 0.98.3-hadoop1 0.98.3-hadoop2 @@ -151,7 +151,7 @@ 1.0.1 1.7.5 4.0.4 - 0.5.0 + 0.5.1 2.2.0 1.2.0-SNAPSHOT 2.10 @@ -982,6 +982,11 @@ org.apache.hadoop + hadoop-client + ${hadoop-20S.version} + + + org.apache.hadoop hadoop-core ${hadoop-20S.version} @@ -1024,6 +1029,11 @@ org.apache.hadoop + hadoop-client + ${hadoop-23.version} + + + org.apache.hadoop hadoop-common ${hadoop-23.version} diff --git a/ql/if/queryplan.thrift b/ql/if/queryplan.thrift index f2a405e..c8dfa35 100644 --- a/ql/if/queryplan.thrift +++ b/ql/if/queryplan.thrift @@ -59,6 +59,7 @@ enum OperatorType { EVENT, ORCFILEMERGE, RCFILEMERGE, + MERGEJOIN, } struct Operator { diff --git a/ql/pom.xml b/ql/pom.xml index 971282a..c796c95 100644 --- a/ql/pom.xml +++ b/ql/pom.xml @@ -28,6 +28,7 @@ Hive Query Language + 0.9.1-incubating-SNAPSHOT .. @@ -182,6 +183,42 @@ ${datanucleus-core.version} + org.apache.optiq + optiq-core + ${optiq.version} + + + + org.hsqldb + hsqldb + + + com.fasterxml.jackson.core + jackson-databind + + + + + org.apache.optiq + optiq-avatica + ${optiq.version} + + + + org.hsqldb + hsqldb + + + com.fasterxml.jackson.core + jackson-databind + + + + com.google.guava guava ${guava.version} diff --git a/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp b/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp index 04a0d67..19d4806 100644 --- a/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp +++ b/ql/src/gen/thrift/gen-cpp/queryplan_types.cpp @@ -54,7 +54,8 @@ int _kOperatorTypeValues[] = { OperatorType::DEMUX, OperatorType::EVENT, OperatorType::ORCFILEMERGE, - OperatorType::RCFILEMERGE + OperatorType::RCFILEMERGE, + OperatorType::MERGEJOIN }; const char* _kOperatorTypeNames[] = { "JOIN", @@ -80,9 +81,10 @@ const char* _kOperatorTypeNames[] = { "DEMUX", "EVENT", "ORCFILEMERGE", - "RCFILEMERGE" + "RCFILEMERGE", + "MERGEJOIN" }; -const std::map _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(24, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); +const std::map _OperatorType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(25, _kOperatorTypeValues, _kOperatorTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); int _kTaskTypeValues[] = { TaskType::MAP, diff --git a/ql/src/gen/thrift/gen-cpp/queryplan_types.h b/ql/src/gen/thrift/gen-cpp/queryplan_types.h index d7797c6..ac73bc5 100644 --- a/ql/src/gen/thrift/gen-cpp/queryplan_types.h +++ b/ql/src/gen/thrift/gen-cpp/queryplan_types.h @@ -59,7 +59,8 @@ struct OperatorType { DEMUX = 20, EVENT = 21, ORCFILEMERGE = 22, - RCFILEMERGE = 23 + RCFILEMERGE = 23, + MERGEJOIN = 24 }; }; diff --git a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java index e5c4c44..e18f935 100644 --- a/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java +++ b/ql/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/ql/plan/api/OperatorType.java @@ -35,7 +35,8 @@ DEMUX(20), EVENT(21), ORCFILEMERGE(22), - RCFILEMERGE(23); + RCFILEMERGE(23), + MERGEJOIN(24); private final int value; @@ -104,6 +105,8 @@ public static OperatorType findByValue(int value) { return ORCFILEMERGE; case 23: return RCFILEMERGE; + case 24: + return MERGEJOIN; default: return null; } diff --git a/ql/src/gen/thrift/gen-php/Types.php b/ql/src/gen/thrift/gen-php/Types.php index e6f87d3..7121ed4 100644 --- a/ql/src/gen/thrift/gen-php/Types.php +++ b/ql/src/gen/thrift/gen-php/Types.php @@ -59,6 +59,7 @@ final class OperatorType { const EVENT = 21; const ORCFILEMERGE = 22; const RCFILEMERGE = 23; + const MERGEJOIN = 24; static public $__names = array( 0 => 'JOIN', 1 => 'MAPJOIN', @@ -84,6 +85,7 @@ final class OperatorType { 21 => 'EVENT', 22 => 'ORCFILEMERGE', 23 => 'RCFILEMERGE', + 24 => 'MERGEJOIN', ); } diff --git a/ql/src/gen/thrift/gen-py/queryplan/ttypes.py b/ql/src/gen/thrift/gen-py/queryplan/ttypes.py index 2e26e58..53c0106 100644 --- a/ql/src/gen/thrift/gen-py/queryplan/ttypes.py +++ b/ql/src/gen/thrift/gen-py/queryplan/ttypes.py @@ -69,6 +69,7 @@ class OperatorType: EVENT = 21 ORCFILEMERGE = 22 RCFILEMERGE = 23 + MERGEJOIN = 24 _VALUES_TO_NAMES = { 0: "JOIN", @@ -95,6 +96,7 @@ class OperatorType: 21: "EVENT", 22: "ORCFILEMERGE", 23: "RCFILEMERGE", + 24: "MERGEJOIN", } _NAMES_TO_VALUES = { @@ -122,6 +124,7 @@ class OperatorType: "EVENT": 21, "ORCFILEMERGE": 22, "RCFILEMERGE": 23, + "MERGEJOIN": 24, } class TaskType: diff --git a/ql/src/gen/thrift/gen-rb/queryplan_types.rb b/ql/src/gen/thrift/gen-rb/queryplan_types.rb index e5e98ae..c2c4220 100644 --- a/ql/src/gen/thrift/gen-rb/queryplan_types.rb +++ b/ql/src/gen/thrift/gen-rb/queryplan_types.rb @@ -45,8 +45,9 @@ module OperatorType EVENT = 21 ORCFILEMERGE = 22 RCFILEMERGE = 23 - VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT", 22 => "ORCFILEMERGE", 23 => "RCFILEMERGE"} - VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT, ORCFILEMERGE, RCFILEMERGE]).freeze + MERGEJOIN = 24 + VALUE_MAP = {0 => "JOIN", 1 => "MAPJOIN", 2 => "EXTRACT", 3 => "FILTER", 4 => "FORWARD", 5 => "GROUPBY", 6 => "LIMIT", 7 => "SCRIPT", 8 => "SELECT", 9 => "TABLESCAN", 10 => "FILESINK", 11 => "REDUCESINK", 12 => "UNION", 13 => "UDTF", 14 => "LATERALVIEWJOIN", 15 => "LATERALVIEWFORWARD", 16 => "HASHTABLESINK", 17 => "HASHTABLEDUMMY", 18 => "PTF", 19 => "MUX", 20 => "DEMUX", 21 => "EVENT", 22 => "ORCFILEMERGE", 23 => "RCFILEMERGE", 24 => "MERGEJOIN"} + VALID_VALUES = Set.new([JOIN, MAPJOIN, EXTRACT, FILTER, FORWARD, GROUPBY, LIMIT, SCRIPT, SELECT, TABLESCAN, FILESINK, REDUCESINK, UNION, UDTF, LATERALVIEWJOIN, LATERALVIEWFORWARD, HASHTABLESINK, HASHTABLEDUMMY, PTF, MUX, DEMUX, EVENT, ORCFILEMERGE, RCFILEMERGE, MERGEJOIN]).freeze end module TaskType diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 44d3c46..8c7266b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -390,6 +390,14 @@ public int compile(String command, boolean resetTaskIds) { tree = ParseUtils.findRootNonNullToken(tree); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE); + // Initialize the transaction manager. This must be done before analyze is called. Also + // record the valid transactions for this query. We have to do this at compile time + // because we use the information in planning the query. Also, + // we want to record it at this point so that users see data valid at the point that they + // submit the query. + SessionState.get().initTxnMgr(conf); + recordValidTxns(); + perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE); BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(conf, tree); List saHooks = @@ -422,7 +430,8 @@ public int compile(String command, boolean resetTaskIds) { sem.validate(); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE); - plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId); + plan = new QueryPlan(command, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId, + SessionState.get().getCommandType()); String queryStr = plan.getQueryStr(); conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr); @@ -870,28 +879,24 @@ public QueryPlan getPlan() { // Write the current set of valid transactions into the conf file so that it can be read by // the input format. - private int recordValidTxns() { - try { - ValidTxnList txns = SessionState.get().getTxnMgr().getValidTxns(); - String txnStr = txns.toString(); - conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr); - LOG.debug("Encoding valid txns info " + txnStr); - return 0; - } catch (LockException e) { - errorMessage = "FAILED: Error in determing valid transactions: " + e.getMessage(); - SQLState = ErrorMsg.findSQLState(e.getMessage()); - downstreamError = e; - console.printError(errorMessage, "\n" - + org.apache.hadoop.util.StringUtils.stringifyException(e)); - return 10; - } + private void recordValidTxns() throws LockException { + ValidTxnList txns = SessionState.get().getTxnMgr().getValidTxns(); + String txnStr = txns.toString(); + conf.set(ValidTxnList.VALID_TXNS_KEY, txnStr); + LOG.debug("Encoding valid txns info " + txnStr); + // TODO I think when we switch to cross query transactions we need to keep this list in + // session state rather than agressively encoding it in the conf like this. We can let the + // TableScanOperators then encode it in the conf before calling the input formats. } /** * Acquire read and write locks needed by the statement. The list of objects to be locked are - * obtained from he inputs and outputs populated by the compiler. The lock acuisition scheme is + * obtained from the inputs and outputs populated by the compiler. The lock acuisition scheme is * pretty simple. If all the locks cannot be obtained, error out. Deadlock is avoided by making * sure that the locks are lexicographically sorted. + * + * This method also records the list of valid transactions. This must be done after any + * transactions have been opened and locks acquired. **/ private int acquireLocksAndOpenTxn() { PerfLogger perfLogger = PerfLogger.getPerfLogger(); @@ -927,6 +932,9 @@ private int acquireLocksAndOpenTxn() { desc.setTransactionId(txnId); } } + + // TODO Once we move to cross query transactions we need to add the open transaction to + // our list of valid transactions. We don't have a way to do that right now. } txnMgr.acquireLocks(plan, ctx, userFromUGI); @@ -1108,11 +1116,6 @@ private CommandProcessorResponse runInternal(String command, boolean alreadyComp SessionState ss = SessionState.get(); try { ckLock = checkConcurrency(); - try { - ss.initTxnMgr(conf); - } catch (LockException e) { - throw new SemanticException(e.getMessage(), e); - } } catch (SemanticException e) { errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage(); SQLState = ErrorMsg.findSQLState(e.getMessage()); @@ -1121,11 +1124,8 @@ private CommandProcessorResponse runInternal(String command, boolean alreadyComp + org.apache.hadoop.util.StringUtils.stringifyException(e)); return createProcessorResponse(10); } - int ret = recordValidTxns(); - if (ret != 0) { - return createProcessorResponse(ret); - } + int ret; if (!alreadyCompiled) { ret = compileInternal(command); if (ret != 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 463e8fb..31978fe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -417,6 +417,10 @@ "that implements AcidOutputFormat while transaction manager that supports ACID is in use"), VALUES_TABLE_CONSTRUCTOR_NOT_SUPPORTED(10296, "Values clause with table constructor not yet supported"), + ACID_OP_ON_NONACID_TABLE(10297, "Attempt to do update or delete on table {0} that does not use " + + "an AcidOutputFormat or is not bucketed", true), + ACID_NO_SORTED_BUCKETS(10298, "ACID insert, update, delete not supported on tables that are " + + "sorted, table {0}", true), //========================== 20000 range starts here ========================// SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java index 85d599a..8e1e6e2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java @@ -104,16 +104,14 @@ private QueryProperties queryProperties; private transient Long queryStartTime; + private String operationName; public QueryPlan() { this.reducerTimeStatsPerJobList = new ArrayList(); } - public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime) { - this(queryString, sem, startTime, null); - } - - public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, String queryId) { + public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, String queryId, + String operationName) { this.queryString = queryString; rootTasks = new ArrayList>(); @@ -134,6 +132,7 @@ public QueryPlan(String queryString, BaseSemanticAnalyzer sem, Long startTime, S query.putToQueryAttributes("queryString", this.queryString); queryProperties = sem.getQueryProperties(); queryStartTime = startTime; + this.operationName = operationName; } public String getQueryStr() { @@ -786,4 +785,8 @@ public Long getQueryStartTime() { public void setQueryStartTime(Long queryStartTime) { this.queryStartTime = queryStartTime; } + + public String getOperationName() { + return operationName; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java index a73256f..5dab171 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryProperties.java @@ -48,12 +48,37 @@ boolean mapJoinRemoved = false; boolean hasMapGroupBy = false; + private int noOfJoins = 0; + private int noOfOuterJoins = 0; + private boolean hasLateralViews; + + private boolean multiDestQuery; + private boolean filterWithSubQuery; + public boolean hasJoin() { - return hasJoin; + return (noOfJoins > 0); } - public void setHasJoin(boolean hasJoin) { - this.hasJoin = hasJoin; + public void incrementJoinCount(boolean outerJoin) { + noOfJoins++; + if (outerJoin) + noOfOuterJoins++; + } + + public int getJoinCount() { + return noOfJoins; + } + + public int getOuterJoinCount() { + return noOfOuterJoins; + } + + public void setHasLateralViews(boolean hasLateralViews) { + this.hasLateralViews = hasLateralViews; + } + + public boolean hasLateralViews() { + return hasLateralViews; } public boolean hasGroupBy() { @@ -144,6 +169,22 @@ public void setHasMapGroupBy(boolean hasMapGroupBy) { this.hasMapGroupBy = hasMapGroupBy; } + public boolean hasMultiDestQuery() { + return this.multiDestQuery; + } + + public void setMultiDestQuery(boolean multiDestQuery) { + this.multiDestQuery = multiDestQuery; + } + + public void setFilterWithSubQuery(boolean filterWithSubQuery) { + this.filterWithSubQuery = filterWithSubQuery; + } + + public boolean hasFilterWithSubQuery() { + return this.filterWithSubQuery; + } + public void clear() { hasJoin = false; hasGroupBy = false; @@ -160,5 +201,11 @@ public void clear() { hasClusterBy = false; mapJoinRemoved = false; hasMapGroupBy = false; + + noOfJoins = 0; + noOfOuterJoins = 0; + + multiDestQuery = false; + filterWithSubQuery = false; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java index 8c1067e..84b4a68 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractMapJoinOperator.java @@ -61,13 +61,13 @@ public AbstractMapJoinOperator(AbstractMapJoinOperator mj @Override @SuppressWarnings("unchecked") protected void initializeOp(Configuration hconf) throws HiveException { - int tagLen = conf.getTagLength(); - - joinKeys = new List[tagLen]; - - JoinUtil.populateJoinKeyValue(joinKeys, conf.getKeys(), NOTSKIPBIGTABLE); - joinKeysObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinKeys, - inputObjInspectors,NOTSKIPBIGTABLE, tagLen); + if (conf.getGenJoinKeys()) { + int tagLen = conf.getTagLength(); + joinKeys = new List[tagLen]; + JoinUtil.populateJoinKeyValue(joinKeys, conf.getKeys(), NOTSKIPBIGTABLE); + joinKeysObjectInspectors = JoinUtil.getObjectInspectorsFromEvaluators(joinKeys, + inputObjInspectors,NOTSKIPBIGTABLE, tagLen); + } super.initializeOp(hconf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java index 7315be5..b9be486 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AppMasterEventOperator.java @@ -44,10 +44,10 @@ @SuppressWarnings({ "deprecation", "serial" }) public class AppMasterEventOperator extends Operator { - private transient Serializer serializer; - private transient DataOutputBuffer buffer; - private transient boolean hasReachedMaxSize = false; - private transient long MAX_SIZE; + protected transient Serializer serializer; + protected transient DataOutputBuffer buffer; + protected transient boolean hasReachedMaxSize = false; + protected transient long MAX_SIZE; @Override public void initializeOp(Configuration hconf) throws HiveException { @@ -57,12 +57,9 @@ public void initializeOp(Configuration hconf) throws HiveException { initDataBuffer(false); } - private void initDataBuffer(boolean skipPruning) throws HiveException { + protected void initDataBuffer(boolean skipPruning) throws HiveException { buffer = new DataOutputBuffer(); try { - // where does this go to? - buffer.writeUTF(((TezContext) TezContext.get()).getTezProcessorContext().getTaskVertexName()); - // add any other header info getConf().writeEventHeader(buffer); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java index 3110b0a..8b3489f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java @@ -323,7 +323,6 @@ protected void initializeOp(Configuration hconf) throws HiveException { @Override public void startGroup() throws HiveException { - LOG.trace("Join: Starting new group"); newGroupStarted = true; for (AbstractRowContainer> alw : storage) { alw.clearRows(); @@ -632,8 +631,6 @@ protected final short getFilterTag(List row) { */ @Override public void endGroup() throws HiveException { - LOG.trace("Join Op: endGroup called: numValues=" + numAliases); - checkAndGenObject(); } @@ -719,7 +716,6 @@ protected void checkAndGenObject() throws HiveException { if (noOuterJoin) { if (alw.rowCount() == 0) { - LOG.trace("No data for alias=" + i); return; } else if (alw.rowCount() > 1) { mayHasMoreThanOne = true; @@ -776,7 +772,6 @@ protected void reportProgress() { */ @Override public void closeOp(boolean abort) throws HiveException { - LOG.trace("Join Op close"); for (AbstractRowContainer> alw : storage) { if (alw != null) { alw.clearRows(); // clean up the temp files diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java new file mode 100644 index 0000000..1d1405e --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java @@ -0,0 +1,507 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.persistence.RowContainer; +import org.apache.hadoop.hive.ql.exec.tez.RecordSource; +import org.apache.hadoop.hive.ql.exec.tez.TezContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.api.OperatorType; +import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.io.WritableComparable; +import org.apache.hadoop.io.WritableComparator; + +/* + * With an aim to consolidate the join algorithms to either hash based joins (MapJoinOperator) or + * sort-merge based joins, this operator is being introduced. This operator executes a sort-merge + * based algorithm. It replaces both the JoinOperator and the SMBMapJoinOperator for the tez side of + * things. It works in either the map phase or reduce phase. + * + * The basic algorithm is as follows: + * + * 1. The processOp receives a row from a "big" table. + * 2. In order to process it, the operator does a fetch for rows from the other tables. + * 3. Once we have a set of rows from the other tables (till we hit a new key), more rows are + * brought in from the big table and a join is performed. + */ + +public class CommonMergeJoinOperator extends AbstractMapJoinOperator implements + Serializable { + + private static final long serialVersionUID = 1L; + private boolean isBigTableWork; + private static final Log LOG = LogFactory.getLog(CommonMergeJoinOperator.class.getName()); + private Map aliasToInputNameMap; + transient List[] keyWritables; + transient List[] nextKeyWritables; + transient RowContainer>[] nextGroupStorage; + transient RowContainer>[] candidateStorage; + + transient String[] tagToAlias; + private transient boolean[] fetchDone; + private transient boolean[] foundNextKeyGroup; + transient boolean firstFetchHappened = false; + transient boolean localWorkInited = false; + transient boolean initDone = false; + transient List otherKey = null; + transient List values = null; + transient RecordSource[] sources; + transient List> originalParents = + new ArrayList>(); + + public CommonMergeJoinOperator() { + super(); + } + + @SuppressWarnings("unchecked") + @Override + public void initializeOp(Configuration hconf) throws HiveException { + super.initializeOp(hconf); + firstFetchHappened = false; + initializeChildren(hconf); + int maxAlias = 0; + for (byte pos = 0; pos < order.length; pos++) { + if (pos > maxAlias) { + maxAlias = pos; + } + } + maxAlias += 1; + + nextGroupStorage = new RowContainer[maxAlias]; + candidateStorage = new RowContainer[maxAlias]; + keyWritables = new ArrayList[maxAlias]; + nextKeyWritables = new ArrayList[maxAlias]; + fetchDone = new boolean[maxAlias]; + foundNextKeyGroup = new boolean[maxAlias]; + + int bucketSize; + + int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEMAPJOINBUCKETCACHESIZE); + if (oldVar != 100) { + bucketSize = oldVar; + } else { + bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVESMBJOINCACHEROWS); + } + + for (byte pos = 0; pos < order.length; pos++) { + RowContainer> rc = + JoinUtil.getRowContainer(hconf, rowContainerStandardObjectInspectors[pos], pos, + bucketSize, spillTableDesc, conf, !hasFilter(pos), reporter); + nextGroupStorage[pos] = rc; + RowContainer> candidateRC = + JoinUtil.getRowContainer(hconf, rowContainerStandardObjectInspectors[pos], pos, + bucketSize, spillTableDesc, conf, !hasFilter(pos), reporter); + candidateStorage[pos] = candidateRC; + } + + for (byte pos = 0; pos < order.length; pos++) { + if (pos != posBigTable) { + fetchDone[pos] = false; + } + foundNextKeyGroup[pos] = false; + } + + sources = ((TezContext) MapredContext.get()).getRecordSources(); + } + + /* + * (non-Javadoc) + * + * @see org.apache.hadoop.hive.ql.exec.Operator#processOp(java.lang.Object, + * int) this processor has a push-pull model. First call to this method is a + * push but the rest is pulled until we run out of records. + */ + @Override + public void processOp(Object row, int tag) throws HiveException { + posBigTable = (byte) conf.getBigTablePosition(); + + byte alias = (byte) tag; + List value = getFilteredValue(alias, row); + // compute keys and values as StandardObjects + List key = mergeJoinComputeKeys(row, alias); + + if (!firstFetchHappened) { + firstFetchHappened = true; + // fetch the first group for all small table aliases + for (byte pos = 0; pos < order.length; pos++) { + if (pos != posBigTable) { + fetchNextGroup(pos); + } + } + } + + //have we reached a new key group? + boolean nextKeyGroup = processKey(alias, key); + if (nextKeyGroup) { + //assert this.nextGroupStorage[alias].size() == 0; + this.nextGroupStorage[alias].addRow(value); + foundNextKeyGroup[tag] = true; + if (tag != posBigTable) { + return; + } + } + + reportProgress(); + numMapRowsRead++; + + // the big table has reached a new key group. try to let the small tables + // catch up with the big table. + if (nextKeyGroup) { + assert tag == posBigTable; + List smallestPos = null; + do { + smallestPos = joinOneGroup(); + //jump out the loop if we need input from the big table + } while (smallestPos != null && smallestPos.size() > 0 + && !smallestPos.contains(this.posBigTable)); + + return; + } + + assert !nextKeyGroup; + candidateStorage[tag].addRow(value); + + } + + private List joinOneGroup() throws HiveException { + int[] smallestPos = findSmallestKey(); + List listOfNeedFetchNext = null; + if (smallestPos != null) { + listOfNeedFetchNext = joinObject(smallestPos); + if (listOfNeedFetchNext.size() > 0) { + // listOfNeedFetchNext contains all tables that we have joined data in their + // candidateStorage, and we need to clear candidate storage and promote their + // nextGroupStorage to candidateStorage and fetch data until we reach a + // new group. + for (Byte b : listOfNeedFetchNext) { + try { + fetchNextGroup(b); + } catch (Exception e) { + throw new HiveException(e); + } + } + } + } + return listOfNeedFetchNext; + } + + private List joinObject(int[] smallestPos) throws HiveException { + List needFetchList = new ArrayList(); + byte index = (byte) (smallestPos.length - 1); + for (; index >= 0; index--) { + if (smallestPos[index] > 0 || keyWritables[index] == null) { + putDummyOrEmpty(index); + continue; + } + storage[index] = candidateStorage[index]; + needFetchList.add(index); + if (smallestPos[index] < 0) { + break; + } + } + for (index--; index >= 0; index--) { + putDummyOrEmpty(index); + } + checkAndGenObject(); + for (Byte pos : needFetchList) { + this.candidateStorage[pos].clearRows(); + this.keyWritables[pos] = null; + } + return needFetchList; + } + + private void putDummyOrEmpty(Byte i) { + // put a empty list or null + if (noOuterJoin) { + storage[i] = emptyList; + } else { + storage[i] = dummyObjVectors[i]; + } + } + + private int[] findSmallestKey() { + int[] result = new int[order.length]; + List smallestOne = null; + + for (byte pos = 0; pos < order.length; pos++) { + List key = keyWritables[pos]; + if (key == null) { + continue; + } + if (smallestOne == null) { + smallestOne = key; + result[pos] = -1; + continue; + } + result[pos] = compareKeys(key, smallestOne); + if (result[pos] < 0) { + smallestOne = key; + } + } + return smallestOne == null ? null : result; + } + + private void fetchNextGroup(Byte t) throws HiveException { + if (foundNextKeyGroup[t]) { + // first promote the next group to be the current group if we reached a + // new group in the previous fetch + if ((this.nextKeyWritables[t] != null) || (this.fetchDone[t] == false)) { + promoteNextGroupToCandidate(t); + } else { + this.keyWritables[t] = null; + this.candidateStorage[t] = null; + this.nextGroupStorage[t] = null; + } + foundNextKeyGroup[t] = false; + } + // for the big table, we only need to promote the next group to the current group. + if (t == posBigTable) { + return; + } + + // for tables other than the big table, we need to fetch more data until reach a new group or + // done. + while (!foundNextKeyGroup[t]) { + if (fetchDone[t]) { + break; + } + fetchOneRow(t); + } + if (!foundNextKeyGroup[t] && fetchDone[t]) { + this.nextKeyWritables[t] = null; + } + } + + @Override + public void closeOp(boolean abort) throws HiveException { + joinFinalLeftData(); + + // clean up + for (int pos = 0; pos < order.length; pos++) { + if (pos != posBigTable) { + fetchDone[pos] = false; + } + foundNextKeyGroup[pos] = false; + } + } + + private void fetchOneRow(byte tag) throws HiveException { + try { + fetchDone[tag] = !sources[tag].pushRecord(); + if (sources[tag].isGrouped()) { + // instead of maintaining complex state for the fetch of the next group, + // we know for sure that at the end of all the values for a given key, + // we will definitely reach the next key group. + foundNextKeyGroup[tag] = true; + } + } catch (Exception e) { + throw new HiveException(e); + } + } + + private void joinFinalLeftData() throws HiveException { + @SuppressWarnings("rawtypes") + RowContainer bigTblRowContainer = this.candidateStorage[this.posBigTable]; + + boolean allFetchDone = allFetchDone(); + // if all left data in small tables are less than and equal to the left data + // in big table, let's them catch up + while (bigTblRowContainer != null && bigTblRowContainer.rowCount() > 0 && !allFetchDone) { + joinOneGroup(); + bigTblRowContainer = this.candidateStorage[this.posBigTable]; + allFetchDone = allFetchDone(); + } + + while (!allFetchDone) { + List ret = joinOneGroup(); + if (ret == null || ret.size() == 0) { + break; + } + reportProgress(); + numMapRowsRead++; + allFetchDone = allFetchDone(); + } + + boolean dataInCache = true; + while (dataInCache) { + for (byte pos = 0; pos < order.length; pos++) { + if (this.foundNextKeyGroup[pos] && this.nextKeyWritables[pos] != null) { + promoteNextGroupToCandidate(pos); + } + } + joinOneGroup(); + dataInCache = false; + for (byte pos = 0; pos < order.length; pos++) { + if (this.candidateStorage[pos].rowCount() > 0) { + dataInCache = true; + break; + } + } + } + } + + private boolean allFetchDone() { + boolean allFetchDone = true; + for (byte pos = 0; pos < order.length; pos++) { + if (pos == posBigTable) { + continue; + } + allFetchDone = allFetchDone && fetchDone[pos]; + } + return allFetchDone; + } + + private void promoteNextGroupToCandidate(Byte t) throws HiveException { + this.keyWritables[t] = this.nextKeyWritables[t]; + this.nextKeyWritables[t] = null; + RowContainer> oldRowContainer = this.candidateStorage[t]; + oldRowContainer.clearRows(); + this.candidateStorage[t] = this.nextGroupStorage[t]; + this.nextGroupStorage[t] = oldRowContainer; + } + + private boolean processKey(byte alias, List key) throws HiveException { + List keyWritable = keyWritables[alias]; + if (keyWritable == null) { + // the first group. + keyWritables[alias] = key; + return false; + } else { + int cmp = compareKeys(key, keyWritable); + if (cmp != 0) { + nextKeyWritables[alias] = key; + return true; + } + return false; + } + } + + @SuppressWarnings("rawtypes") + private int compareKeys(List k1, List k2) { + int ret = 0; + + // join keys have difference sizes? + ret = k1.size() - k2.size(); + if (ret != 0) { + return ret; + } + + for (int i = 0; i < k1.size(); i++) { + WritableComparable key_1 = (WritableComparable) k1.get(i); + WritableComparable key_2 = (WritableComparable) k2.get(i); + if (key_1 == null && key_2 == null) { + return nullsafes != null && nullsafes[i] ? 0 : -1; // just return k1 is + // smaller than k2 + } else if (key_1 == null) { + return -1; + } else if (key_2 == null) { + return 1; + } + ret = WritableComparator.get(key_1.getClass()).compare(key_1, key_2); + if (ret != 0) { + return ret; + } + } + return ret; + } + + @SuppressWarnings("unchecked") + private List mergeJoinComputeKeys(Object row, Byte alias) throws HiveException { + if ((joinKeysObjectInspectors != null) && (joinKeysObjectInspectors[alias] != null)) { + return JoinUtil.computeKeys(row, joinKeys[alias], joinKeysObjectInspectors[alias]); + } else { + row = + ObjectInspectorUtils.copyToStandardObject(row, inputObjInspectors[alias], + ObjectInspectorCopyOption.WRITABLE); + StructObjectInspector soi = (StructObjectInspector) inputObjInspectors[alias]; + StructField sf = soi.getStructFieldRef(Utilities.ReduceField.KEY.toString()); + return (List) soi.getStructFieldData(row, sf); + } + } + + @Override + public String getName() { + return getOperatorName(); + } + + static public String getOperatorName() { + return "MERGEJOIN"; + } + + @Override + public OperatorType getType() { + return OperatorType.MERGEJOIN; + } + + @Override + public void initializeLocalWork(Configuration hconf) throws HiveException { + Operator parent = null; + + for (Operator parentOp : parentOperators) { + if (parentOp != null) { + parent = parentOp; + break; + } + } + + if (parent == null) { + throw new HiveException("No valid parents."); + } + Map dummyOps = parent.getTagToOperatorTree(); + for (Entry connectOp : dummyOps.entrySet()) { + parentOperators.add(connectOp.getKey(), connectOp.getValue()); + connectOp.getValue().getChildOperators().add(this); + } + super.initializeLocalWork(hconf); + return; + } + + public boolean isBigTableWork() { + return isBigTableWork; + } + + public void setIsBigTableWork(boolean bigTableWork) { + this.isBigTableWork = bigTableWork; + } + + public int getTagForOperator(Operator op) { + return originalParents.indexOf(op); + } + + public void cloneOriginalParentsList(List> opList) { + originalParents.addAll(opList); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 56bcf1c..d5374bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -3275,19 +3275,21 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } Table oldTbl = tbl.copy(); + List oldCols = (part == null ? tbl.getCols() : part.getCols()); + StorageDescriptor sd = (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd()); if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) { tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName())); tbl.setTableName(Utilities.getTableName(alterTbl.getNewName())); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) { List newCols = alterTbl.getNewCols(); - List oldCols = tbl.getCols(); - if (tbl.getSerializationLib().equals( + String serializationLib = sd.getSerdeInfo().getSerializationLib(); + if (serializationLib.equals( "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { console .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); - tbl.setSerializationLib(LazySimpleSerDe.class.getName()); - tbl.getTTable().getSd().setCols(newCols); + sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); + sd.setCols(newCols); } else { // make sure the columns does not already exist Iterator iterNewCols = newCols.iterator(); @@ -3303,10 +3305,9 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { } oldCols.add(newCol); } - tbl.getTTable().getSd().setCols(oldCols); + sd.setCols(oldCols); } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) { - List oldCols = tbl.getCols(); List newCols = new ArrayList(); Iterator iterOldCols = oldCols.iterator(); String oldName = alterTbl.getOldColName(); @@ -3367,24 +3368,24 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { newCols.add(position, column); } - tbl.getTTable().getSd().setCols(newCols); - + sd.setCols(newCols); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) { // change SerDe to LazySimpleSerDe if it is columnsetSerDe - if (tbl.getSerializationLib().equals( + String serializationLib = sd.getSerdeInfo().getSerializationLib(); + if (serializationLib.equals( "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { console .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); - tbl.setSerializationLib(LazySimpleSerDe.class.getName()); - } else if (!tbl.getSerializationLib().equals( + sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName()); + } else if (!serializationLib.equals( MetadataTypedColumnsetSerDe.class.getName()) - && !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName()) - && !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName()) - && !tbl.getSerializationLib().equals(DynamicSerDe.class.getName()) - && !tbl.getSerializationLib().equals(ParquetHiveSerDe.class.getName())) { + && !serializationLib.equals(LazySimpleSerDe.class.getName()) + && !serializationLib.equals(ColumnarSerDe.class.getName()) + && !serializationLib.equals(DynamicSerDe.class.getName()) + && !serializationLib.equals(ParquetHiveSerDe.class.getName())) { throw new HiveException(ErrorMsg.CANNOT_REPLACE_COLUMNS, alterTbl.getOldName()); } - tbl.getTTable().getSd().setCols(alterTbl.getNewCols()); + sd.setCols(alterTbl.getNewCols()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDPROPS) { tbl.getTTable().getParameters().putAll(alterTbl.getProps()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.DROPPROPS) { @@ -3393,47 +3394,26 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { tbl.getTTable().getParameters().remove(keyItr.next()); } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) { - if (part != null) { - part.getTPartition().getSd().getSerdeInfo().getParameters().putAll( - alterTbl.getProps()); - } else { - tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll( - alterTbl.getProps()); - } + sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps()); } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) { String serdeName = alterTbl.getSerdeName(); + sd.getSerdeInfo().setSerializationLib(serdeName); + if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) { + sd.getSerdeInfo().getParameters().putAll(alterTbl.getProps()); + } if (part != null) { - part.getTPartition().getSd().getSerdeInfo().setSerializationLib(serdeName); - if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) { - part.getTPartition().getSd().getSerdeInfo().getParameters().putAll( - alterTbl.getProps()); - } part.getTPartition().getSd().setCols(part.getTPartition().getSd().getCols()); } else { - tbl.setSerializationLib(alterTbl.getSerdeName()); - if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) { - tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll( - alterTbl.getProps()); - } if (!Table.hasMetastoreBasedSchema(conf, serdeName)) { tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl. getDeserializer())); } } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) { - if(part != null) { - part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat()); - part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat()); - if (alterTbl.getSerdeName() != null) { - part.getTPartition().getSd().getSerdeInfo().setSerializationLib( - alterTbl.getSerdeName()); - } - } else { - tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat()); - tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat()); - if (alterTbl.getSerdeName() != null) { - tbl.setSerializationLib(alterTbl.getSerdeName()); - } + sd.setInputFormat(alterTbl.getInputFormat()); + sd.setOutputFormat(alterTbl.getOutputFormat()); + if (alterTbl.getSerdeName() != null) { + sd.getSerdeInfo().setSerializationLib(alterTbl.getSerdeName()); } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERPROTECTMODE) { boolean protectModeEnable = alterTbl.isProtectModeEnable(); @@ -3463,8 +3443,6 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { .getColumnNamesFromSortCols(alterTbl.getSortColumns())); } - StorageDescriptor sd = part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd(); - if (alterTbl.isTurnOffSorting()) { sd.setSortCols(new ArrayList()); } else if (alterTbl.getNumberBuckets() == -1) { @@ -3485,11 +3463,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { || locUri.getScheme().trim().equals("")) { throw new HiveException(ErrorMsg.BAD_LOCATION_VALUE, newLocation); } - if (part != null) { - part.setLocation(newLocation); - } else { - tbl.setDataLocation(new Path(locUri)); - } + sd.setLocation(newLocation); } catch (URISyntaxException e) { throw new HiveException(e); } @@ -3689,7 +3663,7 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc } // drop the table - db.dropTable(dropTbl.getTableName()); + db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge()); if (tbl != null) { // We have already locked the table in DDLSemanticAnalyzer, don't do it again here work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); @@ -4233,7 +4207,7 @@ private boolean needToUpdateStats(Map props) { String statVal = props.get(stat); if (statVal != null && Long.parseLong(statVal) > 0) { statsPresent = true; - props.put(statVal, "0"); + props.put(stat, "0"); props.put(StatsSetupConst.COLUMN_STATS_ACCURATE, "false"); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java index b8f5227..2b15c83 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java @@ -65,7 +65,7 @@ */ public class DummyStoreOperator extends Operator implements Serializable { - private transient InspectableObject result; + protected transient InspectableObject result; public DummyStoreOperator() { super(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java index bb5f4f3..97ee976 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java @@ -45,6 +45,7 @@ transient ExprNodeEvaluator[] children; transient GenericUDF.DeferredObject[] deferredChildren; transient boolean isEager; + transient boolean isConstant = false; /** * Class to allow deferred evaluation for GenericUDF. @@ -124,7 +125,10 @@ public ObjectInspector initialize(ObjectInspector rowInspector) throws HiveExcep if (context != null) { context.setup(genericUDF); } - return outputOI = genericUDF.initializeAndFoldConstants(childrenOIs); + outputOI = genericUDF.initializeAndFoldConstants(childrenOIs); + isConstant = ObjectInspectorUtils.isConstantObjectInspector(outputOI) + && isDeterministic(); + return outputOI; } @Override @@ -154,12 +158,11 @@ public boolean isStateful() { @Override protected Object _evaluate(Object row, int version) throws HiveException { - rowObject = row; - if (ObjectInspectorUtils.isConstantObjectInspector(outputOI) && - isDeterministic()) { + if (isConstant) { // The output of this UDF is constant, so don't even bother evaluating. - return ((ConstantObjectInspector)outputOI).getWritableConstantValue(); + return ((ConstantObjectInspector) outputOI).getWritableConstantValue(); } + rowObject = row; for (int i = 0; i < deferredChildren.length; i++) { deferredChildren[i].prepare(version); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index b0de749..8422782 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -165,7 +165,7 @@ private void initialize() { private void setupExecContext() { if (hasVC || work.getSplitSample() != null) { - context = new ExecMapperContext(); + context = new ExecMapperContext(job); if (operator != null) { operator.setExecContext(context); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 3ff0782..e02071c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -29,6 +29,8 @@ import java.util.Map; import java.util.Set; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -38,13 +40,13 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.io.AcidUtils; -import org.apache.hadoop.hive.ql.io.RecordUpdater; -import org.apache.hadoop.hive.ql.io.StatsProvidingRecordWriter; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.io.HivePartitioner; import org.apache.hadoop.hive.ql.io.HivePassThroughOutputFormat; +import org.apache.hadoop.hive.ql.io.RecordUpdater; +import org.apache.hadoop.hive.ql.io.StatsProvidingRecordWriter; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveFatalException; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; @@ -72,14 +74,16 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.ReflectionUtils; -import com.google.common.collect.Lists; - /** * File Sink operator implementation. **/ public class FileSinkOperator extends TerminalOperator implements Serializable { + public static final Log LOG = LogFactory.getLog(FileSinkOperator.class); + private static final boolean isInfoEnabled = LOG.isInfoEnabled(); + private static final boolean isDebugEnabled = LOG.isDebugEnabled(); + protected transient HashMap valToPaths; protected transient int numDynParts; protected transient List dpColNames; @@ -101,10 +105,6 @@ protected transient boolean isCollectRWStats; private transient FSPaths prevFsp; private transient FSPaths fpaths; - private transient ObjectInspector keyOI; - private transient List keyWritables; - private transient List keys; - private transient int numKeyColToRead; private StructField recIdField; // field to find record identifier in private StructField bucketField; // field bucket is in in record id private StructObjectInspector recIdInspector; // OI for inspecting record id @@ -131,9 +131,6 @@ int acidLastBucket = -1; int acidFileOffset = -1; - public FSPaths() { - } - public FSPaths(Path specPath) { tmpPath = Utilities.toTempPath(specPath); taskOutputTempPath = Utilities.toTaskTempPath(specPath); @@ -141,7 +138,9 @@ public FSPaths(Path specPath) { finalPaths = new Path[numFiles]; outWriters = new RecordWriter[numFiles]; updaters = new RecordUpdater[numFiles]; - LOG.debug("Created slots for " + numFiles); + if (isDebugEnabled) { + LOG.debug("Created slots for " + numFiles); + } stat = new Stat(); } @@ -326,7 +325,6 @@ protected void initializeOp(Configuration hconf) throws HiveException { parent = Utilities.toTempPath(conf.getDirName()); statsCollectRawDataSize = conf.isStatsCollectRawDataSize(); statsFromRecordWriter = new boolean[numFiles]; - serializer = (Serializer) conf.getTableInfo().getDeserializerClass().newInstance(); serializer.initialize(null, conf.getTableInfo().getProperties()); outputClass = serializer.getSerializedClass(); @@ -363,20 +361,6 @@ protected void initializeOp(Configuration hconf) throws HiveException { lbSetup(); } - int numPart = 0; - int numBuck = 0; - if (conf.getPartitionCols() != null && !conf.getPartitionCols().isEmpty()) { - numPart = conf.getPartitionCols().size(); - } - - // bucket number will exists only in PARTITION_BUCKET_SORTED mode - if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { - numBuck = 1; - } - numKeyColToRead = numPart + numBuck; - keys = Lists.newArrayListWithCapacity(numKeyColToRead); - keyWritables = Lists.newArrayListWithCapacity(numKeyColToRead); - if (!bDynParts) { fsp = new FSPaths(specPath); @@ -423,7 +407,8 @@ private void dpSetup() { this.dpColNames = dpCtx.getDPColNames(); this.maxPartitions = dpCtx.getMaxPartitionsPerNode(); - assert numDynParts == dpColNames.size() : "number of dynamic paritions should be the same as the size of DP mapping"; + assert numDynParts == dpColNames.size() + : "number of dynamic paritions should be the same as the size of DP mapping"; if (dpColNames != null && dpColNames.size() > 0) { this.bDynParts = true; @@ -441,6 +426,9 @@ private void dpSetup() { newFieldsOI.add(sf.getFieldObjectInspector()); newFieldsName.add(sf.getFieldName()); this.dpStartCol++; + } else { + // once we found the start column for partition column we are done + break; } } assert newFieldsOI.size() > 0 : "new Fields ObjectInspector is empty"; @@ -457,11 +445,15 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { Set seenBuckets = new HashSet(); for (int idx = 0; idx < totalFiles; idx++) { if (this.getExecContext() != null && this.getExecContext().getFileId() != null) { - LOG.info("replace taskId from execContext "); + if (isInfoEnabled) { + LOG.info("replace taskId from execContext "); + } taskId = Utilities.replaceTaskIdFromFilename(taskId, this.getExecContext().getFileId()); - LOG.info("new taskId: FS " + taskId); + if (isInfoEnabled) { + LOG.info("new taskId: FS " + taskId); + } assert !multiFileSpray; assert totalFiles == 1; @@ -515,9 +507,13 @@ protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) try { if (isNativeTable) { fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, null); - LOG.info("Final Path: FS " + fsp.finalPaths[filesIdx]); + if (isInfoEnabled) { + LOG.info("Final Path: FS " + fsp.finalPaths[filesIdx]); + } fsp.outPaths[filesIdx] = fsp.getTaskOutPath(taskId); - LOG.info("Writing to temp file: FS " + fsp.outPaths[filesIdx]); + if (isInfoEnabled) { + LOG.info("Writing to temp file: FS " + fsp.outPaths[filesIdx]); + } } else { fsp.finalPaths[filesIdx] = fsp.outPaths[filesIdx] = specPath; } @@ -532,7 +528,9 @@ protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, extension); } - LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]); + if (isInfoEnabled) { + LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]); + } if (isNativeTable) { // in recent hadoop versions, use deleteOnExit to clean tmp files. @@ -604,14 +602,22 @@ public void processOp(Object row, int tag) throws HiveException { updateProgress(); // if DP is enabled, get the final output writers and prepare the real output row - assert inputObjInspectors[0].getCategory() == ObjectInspector.Category.STRUCT : "input object inspector is not struct"; + assert inputObjInspectors[0].getCategory() == ObjectInspector.Category.STRUCT + : "input object inspector is not struct"; if (bDynParts) { + + // we need to read bucket number which is the last column in value (after partition columns) + if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { + numDynParts += 1; + } + // copy the DP column values from the input row to dpVals dpVals.clear(); dpWritables.clear(); - ObjectInspectorUtils.partialCopyToStandardObject(dpWritables, row, dpStartCol, numDynParts, - (StructObjectInspector) inputObjInspectors[0], ObjectInspectorCopyOption.WRITABLE); + ObjectInspectorUtils.partialCopyToStandardObject(dpWritables, row, dpStartCol,numDynParts, + (StructObjectInspector) inputObjInspectors[0],ObjectInspectorCopyOption.WRITABLE); + // get a set of RecordWriter based on the DP column values // pass the null value along to the escaping process to determine what the dir should be for (Object o : dpWritables) { @@ -621,16 +627,11 @@ public void processOp(Object row, int tag) throws HiveException { dpVals.add(o.toString()); } } - // use SubStructObjectInspector to serialize the non-partitioning columns in the input row - recordValue = serializer.serialize(row, subSetOI); - // when dynamic partition sorting is not used, the DPSortState will be NONE - // in which we will fall back to old method of file system path creation - // i.e, having as many record writers as distinct values in partition column - if (conf.getDpSortState().equals(DPSortState.NONE)) { - fpaths = getDynOutPaths(dpVals, lbDirName); - } + fpaths = getDynOutPaths(dpVals, lbDirName); + // use SubStructObjectInspector to serialize the non-partitioning columns in the input row + recordValue = serializer.serialize(row, subSetOI); } else { if (lbDirName != null) { fpaths = lookupListBucketingPaths(lbDirName); @@ -686,8 +687,10 @@ public void processOp(Object row, int tag) throws HiveException { fpaths.updaters[++fpaths.acidFileOffset] = HiveFileFormatUtils.getAcidRecordUpdater( jc, conf.getTableInfo(), bucketNum, conf, fpaths.outPaths[fpaths.acidFileOffset], rowInspector, reporter, 0); - LOG.debug("Created updater for bucket number " + bucketNum + " using file " + - fpaths.outPaths[fpaths.acidFileOffset]); + if (isDebugEnabled) { + LOG.debug("Created updater for bucket number " + bucketNum + " using file " + + fpaths.outPaths[fpaths.acidFileOffset]); + } } if (conf.getWriteType() == AcidUtils.Operation.UPDATE) { @@ -834,10 +837,8 @@ protected FSPaths getDynOutPaths(List row, String lbDirName) throws Hive if (dpDir != null) { dpDir = appendToSource(lbDirName, dpDir); pathKey = dpDir; - int numericBucketNum = 0; if(conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { String buckNum = row.get(row.size() - 1); - numericBucketNum = Integer.valueOf(buckNum); taskId = Utilities.replaceTaskIdFromFilename(Utilities.getTaskId(hconf), buckNum); pathKey = appendToSource(taskId, dpDir); } @@ -918,26 +919,6 @@ private String getDynPartDirectory(List row, List dpColNames) { } @Override - public void startGroup() throws HiveException { - if (!conf.getDpSortState().equals(DPSortState.NONE)) { - keyOI = getGroupKeyObjectInspector(); - keys.clear(); - keyWritables.clear(); - ObjectInspectorUtils.partialCopyToStandardObject(keyWritables, getGroupKeyObject(), 0, - numKeyColToRead, (StructObjectInspector) keyOI, ObjectInspectorCopyOption.WRITABLE); - - for (Object o : keyWritables) { - if (o == null || o.toString().length() == 0) { - keys.add(dpCtx.getDefaultPartitionName()); - } else { - keys.add(o.toString()); - } - } - fpaths = getDynOutPaths(keys, null); - } - } - - @Override public void closeOp(boolean abort) throws HiveException { if (!bDynParts && !filesCreated) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java index 516ba42..0f818f5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java @@ -76,7 +76,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { statsMap.put(Counter.FILTERED, filtered_count); statsMap.put(Counter.PASSED, passed_count); conditionInspector = null; - ioContext = IOContext.get(); + ioContext = IOContext.get(hconf); } catch (Throwable e) { throw new HiveException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index 80b7420..d047b25 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -639,6 +639,14 @@ private static void loadFunctionResourcesIfNecessary(String functionName, Common } } + public static String getNormalizedFunctionName(String fn) { + // Does the same thing as getFunctionInfo, except for getting the function info. + fn = fn.toLowerCase(); + return (FunctionUtils.isQualifiedFunctionName(fn) || mFunctions.get(fn) != null) ? fn + : FunctionUtils.qualifyFunctionName( + fn, SessionState.get().getCurrentDatabase().toLowerCase()); + } + private static T getFunctionInfo( Map mFunctions, String functionName) { functionName = functionName.toLowerCase(); @@ -861,15 +869,7 @@ public static TypeInfo getTypeInfoForPrimitiveCategory( TypeInfoUtils.getCharacterLengthForType(b)); return TypeInfoFactory.getVarcharTypeInfo(maxLength); case DECIMAL: - int prec1 = HiveDecimalUtils.getPrecisionForType(a); - int prec2 = HiveDecimalUtils.getPrecisionForType(b); - int scale1 = HiveDecimalUtils.getScaleForType(a); - int scale2 = HiveDecimalUtils.getScaleForType(b); - int intPart = Math.max(prec1 - scale1, prec2 - scale2); - int decPart = Math.max(scale1, scale2); - int prec = Math.min(intPart + decPart, HiveDecimal.MAX_PRECISION); - int scale = Math.min(decPart, HiveDecimal.MAX_PRECISION - intPart); - return TypeInfoFactory.getDecimalTypeInfo(prec, scale); + return HiveDecimalUtils.getDecimalTypeForPrimitiveCategories(a, b); default: // Type doesn't require any qualifiers. return TypeInfoFactory.getPrimitiveTypeInfo( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index 792d87f..4632f08 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -77,6 +77,7 @@ private static final Log LOG = LogFactory.getLog(GroupByOperator.class .getName()); + private static final boolean isTraceEnabled = LOG.isTraceEnabled(); private static final long serialVersionUID = 1L; private static final int NUMROWSESTIMATESIZE = 1000; @@ -101,6 +102,7 @@ transient ExprNodeEvaluator unionExprEval = null; transient GenericUDAFEvaluator[] aggregationEvaluators; + transient boolean[] estimableAggregationEvaluators; protected transient ArrayList objectInspectors; transient ArrayList fieldNames; @@ -442,10 +444,10 @@ private void computeMaxEntriesHashAggr(Configuration hconf) throws HiveException estimateRowSize(); } - private static final int javaObjectOverHead = 64; - private static final int javaHashEntryOverHead = 64; - private static final int javaSizePrimitiveType = 16; - private static final int javaSizeUnknownType = 256; + public static final int javaObjectOverHead = 64; + public static final int javaHashEntryOverHead = 64; + public static final int javaSizePrimitiveType = 16; + public static final int javaSizeUnknownType = 256; /** * The size of the element at position 'pos' is returned, if possible. If the @@ -557,11 +559,13 @@ private void estimateRowSize() throws HiveException { // Go over all the aggregation classes and and get the size of the fields of // fixed length. Keep track of the variable length // fields in these aggregation classes. + estimableAggregationEvaluators = new boolean[aggregationEvaluators.length]; for (int i = 0; i < aggregationEvaluators.length; i++) { fixedRowSize += javaObjectOverHead; AggregationBuffer agg = aggregationEvaluators[i].getNewAggregationBuffer(); if (GenericUDAFEvaluator.isEstimable(agg)) { + estimableAggregationEvaluators[i] = true; continue; } Field[] fArr = ObjectInspectorUtils.getDeclaredNonStaticFields(agg.getClass()); @@ -765,10 +769,12 @@ public void processOp(Object row, int tag) throws HiveException { flushHashTable(true); hashAggr = false; } else { - LOG.trace("Hash Aggr Enabled: #hash table = " + numRowsHashTbl - + " #total = " + numRowsInput + " reduction = " + 1.0 - * (numRowsHashTbl / numRowsInput) + " minReduction = " - + minReductionHashAggr); + if (isTraceEnabled) { + LOG.trace("Hash Aggr Enabled: #hash table = " + numRowsHashTbl + + " #total = " + numRowsInput + " reduction = " + 1.0 + * (numRowsHashTbl / numRowsInput) + " minReduction = " + + minReductionHashAggr); + } } } } @@ -952,7 +958,7 @@ private boolean shouldBeFlushed(KeyWrapper newKeys) { AggregationBuffer[] aggs = hashAggregations.get(newKeys); for (int i = 0; i < aggs.length; i++) { AggregationBuffer agg = aggs[i]; - if (GenericUDAFEvaluator.isEstimable(agg)) { + if (estimableAggregationEvaluators[i]) { totalVariableSize += ((GenericUDAFEvaluator.AbstractAggregationBuffer)agg).estimate(); continue; } @@ -966,8 +972,10 @@ private boolean shouldBeFlushed(KeyWrapper newKeys) { // Update the number of entries that can fit in the hash table numEntriesHashTable = (int) (maxHashTblMemory / (fixedRowSize + (totalVariableSize / numEntriesVarSize))); - LOG.trace("Hash Aggr: #hash table = " + numEntries - + " #max in hash table = " + numEntriesHashTable); + if (isTraceEnabled) { + LOG.trace("Hash Aggr: #hash table = " + numEntries + + " #max in hash table = " + numEntriesHashTable); + } } // flush if necessary diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index ef0c055..e3877d9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -171,8 +171,9 @@ public void generateMapMetaData() throws HiveException, SerDeException { private void loadHashTable() throws HiveException { - if (this.getExecContext().getLocalWork() == null - || !this.getExecContext().getLocalWork().getInputFileChangeSensitive()) { + if ((this.getExecContext() != null) + && ((this.getExecContext().getLocalWork() == null) || (!this.getExecContext() + .getLocalWork().getInputFileChangeSensitive()))) { if (hashTblInitedOnce) { return; } else { @@ -313,8 +314,8 @@ public void closeOp(boolean abort) throws HiveException { tableContainer.dumpMetrics(); } } - if ((this.getExecContext().getLocalWork() != null - && this.getExecContext().getLocalWork().getInputFileChangeSensitive()) + if ((this.getExecContext() != null) && (this.getExecContext().getLocalWork() != null) + && (this.getExecContext().getLocalWork().getInputFileChangeSensitive()) && mapJoinTables != null) { for (MapJoinTableContainer tableContainer : mapJoinTables) { if (tableContainer != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java index b1f8358..b702509 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java @@ -33,9 +33,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.ql.io.IOContext; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.exec.tez.MapRecordProcessor; import org.apache.hadoop.hive.ql.io.RecordIdentifier; +import org.apache.hadoop.hive.ql.io.IOContext; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.plan.MapWork; @@ -181,7 +182,7 @@ private MapOpCtx initObjectInspector(Configuration hconf, MapInputPath ctx, PartitionDesc pd = ctx.partDesc; TableDesc td = pd.getTableDesc(); - + MapOpCtx opCtx = new MapOpCtx(); // Use table properties in case of unpartitioned tables, // and the union of table properties and partition properties, with partition @@ -205,42 +206,42 @@ private MapOpCtx initObjectInspector(Configuration hconf, MapInputPath ctx, opCtx.partTblObjectInspectorConverter = ObjectInspectorConverters.getConverter( partRawRowObjectInspector, opCtx.tblRawRowObjectInspector); - + // Next check if this table has partitions and if so // get the list of partition names as well as allocate // the serdes for the partition columns String pcols = overlayedProps.getProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS); - + if (pcols != null && pcols.length() > 0) { String[] partKeys = pcols.trim().split("/"); String pcolTypes = overlayedProps .getProperty(hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES); String[] partKeyTypes = pcolTypes.trim().split(":"); - + if (partKeys.length > partKeyTypes.length) { throw new HiveException("Internal error : partKeys length, " +partKeys.length + " greater than partKeyTypes length, " + partKeyTypes.length); } - + List partNames = new ArrayList(partKeys.length); Object[] partValues = new Object[partKeys.length]; List partObjectInspectors = new ArrayList(partKeys.length); - + for (int i = 0; i < partKeys.length; i++) { String key = partKeys[i]; partNames.add(key); ObjectInspector oi = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector (TypeInfoFactory.getPrimitiveTypeInfo(partKeyTypes[i])); - + // Partitions do not exist for this table if (partSpec == null) { // for partitionless table, initialize partValue to null partValues[i] = null; } else { - partValues[i] = + partValues[i] = ObjectInspectorConverters. getConverter(PrimitiveObjectInspectorFactory. - javaStringObjectInspector, oi).convert(partSpec.get(key)); + javaStringObjectInspector, oi).convert(partSpec.get(key)); } partObjectInspectors.add(oi); } @@ -337,13 +338,8 @@ else if (partRawRowObjectInspector.equals(tblRawRowObjectInspector)) { return tableDescOI; } - private boolean isPartitioned(PartitionDesc pd) { - return pd.getPartSpec() != null && !pd.getPartSpec().isEmpty(); - } - public void setChildren(Configuration hconf) throws HiveException { - - Path fpath = IOContext.get().getInputPath(); + Path fpath = IOContext.get(hconf).getInputPath(); boolean schemeless = fpath.toUri().getScheme() == null; @@ -639,4 +635,8 @@ public OperatorType getType() { return null; } + @Override + public Map getTagToOperatorTree() { + return MapRecordProcessor.getConnectOps(); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 0d9c1a6..583b82b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -353,6 +353,7 @@ public int execute(DriverContext driverContext) { pushFeed(FeedType.DYNAMIC_PARTITIONS, dps); } + long startTime = System.currentTimeMillis(); // load the list of DP partitions and return the list of partition specs // TODO: In a follow-up to HIVE-1361, we should refactor loadDynamicPartitions // to use Utilities.getFullDPSpecs() to get the list of full partSpecs. @@ -360,7 +361,7 @@ public int execute(DriverContext driverContext) { // iterate over it and call loadPartition() here. // The reason we don't do inside HIVE-1361 is the latter is large and we // want to isolate any potential issue it may introduce. - ArrayList> dp = + Map, Partition> dp = db.loadDynamicPartitions( tbd.getSourcePath(), tbd.getTable().getTableName(), @@ -370,16 +371,19 @@ public int execute(DriverContext driverContext) { tbd.getHoldDDLTime(), isSkewedStoredAsDirs(tbd), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID); + console.printInfo("\t Time taken for load dynamic partitions : " + + (System.currentTimeMillis() - startTime)); if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) { throw new HiveException("This query creates no partitions." + " To turn off this error, set hive.error.on.empty.partition=false."); } + startTime = System.currentTimeMillis(); // for each partition spec, get the partition // and put it to WriteEntity for post-exec hook - for (LinkedHashMap partSpec: dp) { - Partition partn = db.getPartition(table, partSpec, false); + for(Map.Entry, Partition> entry : dp.entrySet()) { + Partition partn = entry.getValue(); if (bucketCols != null || sortCols != null) { updatePartitionBucketSortColumns(table, partn, bucketCols, numBuckets, sortCols); @@ -412,8 +416,10 @@ public int execute(DriverContext driverContext) { table.getCols()); } - console.printInfo("\tLoading partition " + partSpec); + console.printInfo("\tLoading partition " + entry.getKey()); } + console.printInfo("\t Time taken for adding to write entity : " + + (System.currentTimeMillis() - startTime)); dc = null; // reset data container to prevent it being added again. } else { // static partitions List partVals = MetaStoreUtils.getPvals(table.getPartCols(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java index db94271..3dc7c76 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java @@ -146,6 +146,7 @@ public int getNumChild() { /** * Implements the getChildren function for the Node Interface. */ + @Override public ArrayList getChildren() { if (getChildOperators() == null) { @@ -497,8 +498,6 @@ protected final void defaultStartGroup() throws HiveException { LOG.debug("Starting group for children:"); for (Operator op : childOperators) { - op.setGroupKeyObjectInspector(groupKeyOI); - op.setGroupKeyObject(groupKeyObject); op.startGroup(); } @@ -851,6 +850,7 @@ public void logStats() { * * @return the name of the operator */ + @Override public String getName() { return getOperatorName(); } @@ -968,7 +968,6 @@ protected static StructObjectInspector initEvaluatorsAndReturnStruct( } protected transient Object groupKeyObject; - protected transient ObjectInspector groupKeyOI; public String getOperatorId() { return operatorId; @@ -1061,7 +1060,7 @@ public boolean supportSkewJoinOptimization() { if (parents != null) { for (Operator parent : parents) { - parentClones.add((Operator)(parent.clone())); + parentClones.add((parent.clone())); } } @@ -1082,8 +1081,8 @@ public boolean supportSkewJoinOptimization() { public Operator cloneOp() throws CloneNotSupportedException { T descClone = (T) conf.clone(); Operator ret = - (Operator) OperatorFactory.getAndMakeChild( - descClone, getSchema()); + OperatorFactory.getAndMakeChild( + descClone, getSchema()); return ret; } @@ -1254,15 +1253,15 @@ public Statistics getStatistics() { } return null; } - + public OpTraits getOpTraits() { if (conf != null) { return conf.getOpTraits(); } - + return null; } - + public void setOpTraits(OpTraits metaInfo) { if (LOG.isDebugEnabled()) { LOG.debug("Setting traits ("+metaInfo+") on "+this); @@ -1285,21 +1284,23 @@ public void setStatistics(Statistics stats) { } } - public void setGroupKeyObjectInspector(ObjectInspector keyObjectInspector) { - this.groupKeyOI = keyObjectInspector; - } - - public ObjectInspector getGroupKeyObjectInspector() { - return groupKeyOI; - } - public static Operator createDummy() { return new DummyOperator(); } private static class DummyOperator extends Operator { public DummyOperator() { super("dummy"); } + @Override public void processOp(Object row, int tag) { } + @Override public OperatorType getType() { return null; } } + + public Map getTagToOperatorTree() { + if ((parentOperators == null) || (parentOperators.size() == 0)) { + return null; + } + Map dummyOps = parentOperators.get(0).getTagToOperatorTree(); + return dummyOps; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java index ed8692d..f1c3564 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.exec; +import org.apache.hadoop.hive.ql.exec.vector.VectorAppMasterEventOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorExtractOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorFileSinkOperator; import org.apache.hadoop.hive.ql.exec.vector.VectorFilterOperator; @@ -31,6 +32,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc; import org.apache.hadoop.hive.ql.plan.CollectDesc; +import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc; import org.apache.hadoop.hive.ql.plan.DemuxDesc; import org.apache.hadoop.hive.ql.plan.DummyStoreDesc; import org.apache.hadoop.hive.ql.plan.DynamicPruningEventDesc; @@ -114,10 +116,16 @@ RCFileMergeOperator.class)); opvec.add(new OpTuple(OrcFileMergeDesc.class, OrcFileMergeOperator.class)); + opvec.add(new OpTuple(CommonMergeJoinDesc.class, + CommonMergeJoinOperator.class)); } static { vectorOpvec = new ArrayList(); + vectorOpvec.add(new OpTuple(AppMasterEventDesc.class, + VectorAppMasterEventOperator.class)); + vectorOpvec.add(new OpTuple(DynamicPruningEventDesc.class, + VectorAppMasterEventOperator.class)); vectorOpvec.add(new OpTuple(SelectDesc.class, VectorSelectOperator.class)); vectorOpvec.add(new OpTuple(GroupByDesc.class, VectorGroupByOperator.class)); vectorOpvec.add(new OpTuple(MapJoinDesc.class, VectorMapJoinOperator.class)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java index bec3cb7..2bd40fa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorUtils.java @@ -46,6 +46,9 @@ public static Set findOperators(Collection> starts, Class clazz) { Set found = new HashSet(); for (Operator start : starts) { + if (start == null) { + continue; + } findOperators(start, clazz, found); } return found; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java index 9bbc4ec..d8698da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector; import org.apache.hadoop.io.BinaryComparable; import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.OutputCollector; @@ -67,6 +66,9 @@ } private static final Log LOG = LogFactory.getLog(ReduceSinkOperator.class.getName()); + private static final boolean isInfoEnabled = LOG.isInfoEnabled(); + private static final boolean isDebugEnabled = LOG.isDebugEnabled(); + private static final boolean isTraceEnabled = LOG.isTraceEnabled(); private static final long serialVersionUID = 1L; private static final MurmurHash hash = (MurmurHash) MurmurHash.getInstance(); @@ -117,6 +119,8 @@ protected transient Object[] cachedValues; protected transient List> distinctColIndices; protected transient Random random; + protected transient int bucketNumber; + /** * This two dimensional array holds key data and a corresponding Union object * which contains the tag identifying the aggregate expression for distinct columns. @@ -144,8 +148,14 @@ protected void initializeOp(Configuration hconf) throws HiveException { try { List keys = conf.getKeyCols(); - LOG.debug("keys size is " + keys.size()); - for (ExprNodeDesc k : keys) LOG.debug("Key exprNodeDesc " + k.getExprString()); + + if (isDebugEnabled) { + LOG.debug("keys size is " + keys.size()); + for (ExprNodeDesc k : keys) { + LOG.debug("Key exprNodeDesc " + k.getExprString()); + } + } + keyEval = new ExprNodeEvaluator[keys.size()]; int i = 0; for (ExprNodeDesc e : keys) { @@ -184,7 +194,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { tag = conf.getTag(); tagByte[0] = (byte) tag; skipTag = conf.getSkipTag(); - LOG.info("Using tag = " + tag); + if (isInfoEnabled) { + LOG.info("Using tag = " + tag); + } TableDesc keyTableDesc = conf.getKeySerializeInfo(); keySerializer = (Serializer) keyTableDesc.getDeserializerClass() @@ -284,7 +296,10 @@ public void processOp(Object row, int tag) throws HiveException { bucketInspector = (IntObjectInspector)bucketField.getFieldObjectInspector(); } - LOG.info("keys are " + conf.getOutputKeyColumnNames() + " num distributions: " + conf.getNumDistributionKeys()); + if (isInfoEnabled) { + LOG.info("keys are " + conf.getOutputKeyColumnNames() + " num distributions: " + + conf.getNumDistributionKeys()); + } keyObjectInspector = initEvaluatorsAndReturnStruct(keyEval, distinctColIndices, conf.getOutputKeyColumnNames(), numDistributionKeys, rowInspector); @@ -304,15 +319,14 @@ public void processOp(Object row, int tag) throws HiveException { populateCachedDistributionKeys(row, 0); // replace bucketing columns with hashcode % numBuckets - int buckNum = -1; if (bucketEval != null) { - buckNum = computeBucketNumber(row, conf.getNumBuckets()); - cachedKeys[0][buckColIdxInKey] = new IntWritable(buckNum); + bucketNumber = computeBucketNumber(row, conf.getNumBuckets()); + cachedKeys[0][buckColIdxInKey] = new Text(String.valueOf(bucketNumber)); } else if (conf.getWriteType() == AcidUtils.Operation.UPDATE || conf.getWriteType() == AcidUtils.Operation.DELETE) { // In the non-partitioned case we still want to compute the bucket number for updates and // deletes. - buckNum = computeBucketNumber(row, conf.getNumBuckets()); + bucketNumber = computeBucketNumber(row, conf.getNumBuckets()); } HiveKey firstKey = toHiveKey(cachedKeys[0], tag, null); @@ -328,7 +342,7 @@ public void processOp(Object row, int tag) throws HiveException { if (autoParallel && partitionEval.length > 0) { hashCode = computeMurmurHash(firstKey); } else { - hashCode = computeHashCode(row, buckNum); + hashCode = computeHashCode(row); } firstKey.setHashCode(hashCode); @@ -377,7 +391,9 @@ private int computeBucketNumber(Object row, int numBuckets) throws HiveException // column directly. Object recIdValue = acidRowInspector.getStructFieldData(row, recIdField); buckNum = bucketInspector.get(recIdInspector.getStructFieldData(recIdValue, bucketField)); - LOG.debug("Acid choosing bucket number " + buckNum); + if (isTraceEnabled) { + LOG.trace("Acid choosing bucket number " + buckNum); + } } else { for (int i = 0; i < bucketEval.length; i++) { Object o = bucketEval[i].evaluate(row); @@ -422,7 +438,7 @@ protected final int computeMurmurHash(HiveKey firstKey) { return hash.hash(firstKey.getBytes(), firstKey.getDistKeyLength(), 0); } - private int computeHashCode(Object row, int buckNum) throws HiveException { + private int computeHashCode(Object row) throws HiveException { // Evaluate the HashCode int keyHashCode = 0; if (partitionEval.length == 0) { @@ -446,8 +462,10 @@ private int computeHashCode(Object row, int buckNum) throws HiveException { + ObjectInspectorUtils.hashCode(o, partitionObjectInspectors[i]); } } - LOG.debug("Going to return hash code " + (keyHashCode * 31 + buckNum)); - return buckNum < 0 ? keyHashCode : keyHashCode * 31 + buckNum; + if (isTraceEnabled) { + LOG.trace("Going to return hash code " + (keyHashCode * 31 + bucketNumber)); + } + return bucketNumber < 0 ? keyHashCode : keyHashCode * 31 + bucketNumber; } private boolean partitionKeysAreNull(Object row) throws HiveException { @@ -493,10 +511,19 @@ protected void collect(BytesWritable keyWritable, Writable valueWritable) throws } private BytesWritable makeValueWritable(Object row) throws Exception { + int length = valueEval.length; + + // in case of bucketed table, insert the bucket number as the last column in value + if (bucketEval != null) { + length -= 1; + cachedValues[length] = new Text(String.valueOf(bucketNumber)); + } + // Evaluate the value - for (int i = 0; i < valueEval.length; i++) { + for (int i = 0; i < length; i++) { cachedValues[i] = valueEval[i].evaluate(row); } + // Serialize the value return (BytesWritable) valueSerializer.serialize(cachedValues, valueObjectInspector); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java new file mode 100644 index 0000000..6a2d268 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TezDummyStoreOperator.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.hive.ql.metadata.HiveException; + +/** + * A dummy store operator same as the dummy store operator but for tez. This is required so that we + * don't check for tez everytime before forwarding a record. In tez records flow down from the dummy + * store operator in processOp phase unlike in map reduce. + * + */ +public class TezDummyStoreOperator extends DummyStoreOperator { + + /** + * Unlike the MR counterpoint, on Tez we want processOp to forward + * the records. + */ + @Override + public void processOp(Object row, int tag) throws HiveException { + super.processOp(row, tag); + forward(result.o, outputObjInspector); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index db64193..7d9feac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.MergeJoinWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; @@ -201,6 +202,8 @@ public static String HADOOP_LOCAL_FS = "file:///"; public static String MAP_PLAN_NAME = "map.xml"; public static String REDUCE_PLAN_NAME = "reduce.xml"; + public static String MERGE_PLAN_NAME = "merge.xml"; + public static final String INPUT_NAME = "iocontext.input.name"; public static final String MAPRED_MAPPER_CLASS = "mapred.mapper.class"; public static final String MAPRED_REDUCER_CLASS = "mapred.reducer.class"; @@ -291,6 +294,39 @@ public static ReduceWork getReduceWork(Configuration conf) { return (ReduceWork) getBaseWork(conf, REDUCE_PLAN_NAME); } + public static Path setMergeWork(JobConf conf, MergeJoinWork mergeJoinWork, Path mrScratchDir, + boolean useCache) { + for (BaseWork baseWork : mergeJoinWork.getBaseWorkList()) { + setBaseWork(conf, baseWork, mrScratchDir, baseWork.getName() + MERGE_PLAN_NAME, useCache); + String prefixes = conf.get(DagUtils.TEZ_MERGE_WORK_FILE_PREFIXES); + if (prefixes == null) { + prefixes = baseWork.getName(); + } else { + prefixes = prefixes + "," + baseWork.getName(); + } + conf.set(DagUtils.TEZ_MERGE_WORK_FILE_PREFIXES, prefixes); + } + + // nothing to return + return null; + } + + public static BaseWork getMergeWork(JobConf jconf) { + if ((jconf.get(DagUtils.TEZ_MERGE_CURRENT_MERGE_FILE_PREFIX) == null) + || (jconf.get(DagUtils.TEZ_MERGE_CURRENT_MERGE_FILE_PREFIX).isEmpty())) { + return null; + } + return getMergeWork(jconf, jconf.get(DagUtils.TEZ_MERGE_CURRENT_MERGE_FILE_PREFIX)); + } + + public static BaseWork getMergeWork(JobConf jconf, String prefix) { + if (prefix == null || prefix.isEmpty()) { + return null; + } + + return getBaseWork(jconf, prefix + MERGE_PLAN_NAME); + } + public static void cacheBaseWork(Configuration conf, String name, BaseWork work, Path hiveScratchDir) { try { @@ -375,6 +411,8 @@ private static BaseWork getBaseWork(Configuration conf, String name) { throw new RuntimeException("unable to determine work from configuration ." + MAPRED_REDUCER_CLASS +" was "+ conf.get(MAPRED_REDUCER_CLASS)) ; } + } else if (name.contains(MERGE_PLAN_NAME)) { + gWork = deserializePlan(in, MapWork.class, conf); } gWorkMap.put(path, gWork); } else { @@ -608,8 +646,14 @@ protected Expression instantiate(Object oldInstance, Encoder out) { } public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) { + String useName = conf.get(INPUT_NAME); + if (useName == null) { + useName = "mapreduce"; + } + conf.set(INPUT_NAME, useName); setMapWork(conf, w.getMapWork(), hiveScratchDir, true); if (w.getReduceWork() != null) { + conf.set(INPUT_NAME, useName); setReduceWork(conf, w.getReduceWork(), hiveScratchDir, true); } } @@ -1846,7 +1890,7 @@ public static void removeTempOrDuplicateFiles(FileSystem fs, Path path) throws I for (int i = 0; i < parts.length; ++i) { assert parts[i].isDir() : "dynamic partition " + parts[i].getPath() - + " is not a direcgtory"; + + " is not a directory"; FileStatus[] items = fs.listStatus(parts[i].getPath()); // remove empty directory since DP insert should not generate empty partitions. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java index 7fb4c46..f188e69 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java @@ -78,10 +78,11 @@ private MapredLocalWork localWork = null; private boolean isLogInfoEnabled = false; - private final ExecMapperContext execContext = new ExecMapperContext(); + private ExecMapperContext execContext = null; @Override public void configure(JobConf job) { + execContext = new ExecMapperContext(job); // Allocate the bean at the beginning - memoryMXBean = ManagementFactory.getMemoryMXBean(); l4j.info("maximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax()); @@ -292,6 +293,7 @@ public ReportStats(Reporter rp) { this.rp = rp; } + @Override public void func(Operator op) { Map, Long> opStats = op.getStats(); for (Map.Entry, Long> e : opStats.entrySet()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java index 74bc2d2..13d0650 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FetchOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.IOContext; import org.apache.hadoop.hive.ql.plan.MapredLocalWork; import org.apache.hadoop.mapred.JobConf; @@ -60,8 +61,9 @@ public void setCurrentBigBucketFile(String currentBigBucketFile) { this.currentBigBucketFile = currentBigBucketFile; } - public ExecMapperContext() { - ioCxt = IOContext.get(); + public ExecMapperContext(JobConf jc) { + this.jc = jc; + ioCxt = IOContext.get(jc); } public void clear() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java index c9e469c..9cd8b56 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecReducer.java @@ -66,6 +66,8 @@ public class ExecReducer extends MapReduceBase implements Reducer { private static final Log LOG = LogFactory.getLog("ExecReducer"); + private static final boolean isInfoEnabled = LOG.isInfoEnabled(); + private static final boolean isTraceEnabled = LOG.isTraceEnabled(); private static final String PLAN_KEY = "__REDUCE_PLAN__"; // used to log memory usage periodically @@ -75,7 +77,6 @@ private final Deserializer[] inputValueDeserializer = new Deserializer[Byte.MAX_VALUE]; private final Object[] valueObject = new Object[Byte.MAX_VALUE]; private final List row = new ArrayList(Utilities.reduceFieldNameList.size()); - private final boolean isLogInfoEnabled = LOG.isInfoEnabled(); // TODO: move to DynamicSerDe when it's ready private Deserializer inputKeyDeserializer; @@ -101,16 +102,18 @@ public void configure(JobConf job) { ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; ObjectInspector keyObjectInspector; - LOG.info("maximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax()); + if (isInfoEnabled) { + LOG.info("maximum memory = " + memoryMXBean.getHeapMemoryUsage().getMax()); - try { - LOG.info("conf classpath = " - + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs())); - LOG.info("thread classpath = " - + Arrays.asList(((URLClassLoader) Thread.currentThread() - .getContextClassLoader()).getURLs())); - } catch (Exception e) { - LOG.info("cannot get classpath: " + e.getMessage()); + try { + LOG.info("conf classpath = " + + Arrays.asList(((URLClassLoader) job.getClassLoader()).getURLs())); + LOG.info("thread classpath = " + + Arrays.asList(((URLClassLoader) Thread.currentThread() + .getContextClassLoader()).getURLs())); + } catch (Exception e) { + LOG.info("cannot get classpath: " + e.getMessage()); + } } jc = job; @@ -147,7 +150,6 @@ public void configure(JobConf job) { ArrayList ois = new ArrayList(); ois.add(keyObjectInspector); ois.add(valueObjectInspector[tag]); - reducer.setGroupKeyObjectInspector(keyObjectInspector); rowObjectInspector[tag] = ObjectInspectorFactory .getStandardStructObjectInspector(Utilities.reduceFieldNameList, ois); } @@ -202,7 +204,9 @@ public void reduce(Object key, Iterator values, OutputCollector output, groupKey = new BytesWritable(); } else { // If a operator wants to do some work at the end of a group - LOG.trace("End Group"); + if (isTraceEnabled) { + LOG.trace("End Group"); + } reducer.endGroup(); } @@ -217,9 +221,11 @@ public void reduce(Object key, Iterator values, OutputCollector output, } groupKey.set(keyWritable.get(), 0, keyWritable.getSize()); - LOG.trace("Start Group"); - reducer.setGroupKeyObject(keyObject); + if (isTraceEnabled) { + LOG.trace("Start Group"); + } reducer.startGroup(); + reducer.setGroupKeyObject(keyObject); } // System.err.print(keyObject.toString()); while (values.hasNext()) { @@ -239,12 +245,14 @@ public void reduce(Object key, Iterator values, OutputCollector output, row.clear(); row.add(keyObject); row.add(valueObject[tag]); - if (isLogInfoEnabled) { + if (isInfoEnabled) { cntr++; if (cntr == nextCntr) { long used_memory = memoryMXBean.getHeapMemoryUsage().getUsed(); - LOG.info("ExecReducer: processing " + cntr - + " rows: used memory = " + used_memory); + if (isInfoEnabled) { + LOG.info("ExecReducer: processing " + cntr + + " rows: used memory = " + used_memory); + } nextCntr = getNextCntr(cntr); } } @@ -290,17 +298,19 @@ private long getNextCntr(long cntr) { public void close() { // No row was processed - if (oc == null) { + if (oc == null && isTraceEnabled) { LOG.trace("Close called without any rows processed"); } try { if (groupKey != null) { // If a operator wants to do some work at the end of a group - LOG.trace("End Group"); + if (isTraceEnabled) { + LOG.trace("End Group"); + } reducer.endGroup(); } - if (isLogInfoEnabled) { + if (isInfoEnabled) { LOG.info("ExecReducer: processed " + cntr + " rows: used memory = " + memoryMXBean.getHeapMemoryUsage().getUsed()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java index 4adfc6c..79da5a0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java @@ -91,7 +91,7 @@ // not sure we need this exec context; but all the operators in the work // will pass this context throught - private ExecMapperContext execContext = new ExecMapperContext(); + private ExecMapperContext execContext = null; private Process executor; @@ -113,6 +113,7 @@ public void setExecContext(ExecMapperContext execContext) { public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext driverContext) { super.initialize(conf, queryPlan, driverContext); job = new JobConf(conf, ExecDriver.class); + execContext = new ExecMapperContext(job); //we don't use the HadoopJobExecHooks for local tasks this.jobExecHelper = new HadoopJobExecHelper(job, console, this, null); } @@ -301,6 +302,11 @@ public int executeInProcess(DriverContext driverContext) { if (work == null) { return -1; } + + if (execContext == null) { + execContext = new ExecMapperContext(job); + } + memoryMXBean = ManagementFactory.getMemoryMXBean(); long startTime = System.currentTimeMillis(); console.printInfo(Utilities.now() diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java index e1bd8fc..20ea977 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkMapRecordHandler.java @@ -63,8 +63,7 @@ private MapredLocalWork localWork = null; private boolean isLogInfoEnabled = false; - - private final ExecMapperContext execContext = new ExecMapperContext(); + private ExecMapperContext execContext; public void init(JobConf job, OutputCollector output, Reporter reporter) { super.init(job, output, reporter); @@ -74,7 +73,7 @@ public void init(JobConf job, OutputCollector output, Reporter reporter) { try { jc = job; - execContext.setJc(jc); + execContext = new ExecMapperContext(jc); // create map and fetch operators MapWork mrwork = (MapWork) cache.retrieve(PLAN_KEY); if (mrwork == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java index dff3517..3e6ca56 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/SparkReduceRecordHandler.java @@ -186,7 +186,7 @@ public void init(JobConf job, OutputCollector output, Reporter reporter) { } else { ois.add(keyObjectInspector); ois.add(valueObjectInspector[tag]); - reducer.setGroupKeyObjectInspector(keyObjectInspector); + //reducer.setGroupKeyObjectInspector(keyObjectInspector); rowObjectInspector[tag] = ObjectInspectorFactory.getStandardStructObjectInspector( Utilities.reduceFieldNameList, ois); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java index f2acd75..848be26 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomPartitionVertex.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.serializer.SerializationFactory; @@ -79,9 +80,14 @@ private List dataInformationEvents; private int numBuckets = -1; private Configuration conf = null; - private boolean rootVertexInitialized = false; private final SplitGrouper grouper = new SplitGrouper(); private int taskCount = 0; + private VertexType vertexType; + private String mainWorkName; + private final Multimap bucketToTaskMap = HashMultimap. create(); + + private final Map> inputToGroupedSplitMap = + new HashMap>(); public CustomPartitionVertex(VertexManagerPluginContext context) { super(context); @@ -90,8 +96,18 @@ public CustomPartitionVertex(VertexManagerPluginContext context) { @Override public void initialize() { this.context = getContext(); - ByteBuffer byteBuf = context.getUserPayload().getPayload(); - this.numBuckets = byteBuf.getInt(); + ByteBuffer payload = context.getUserPayload().getPayload(); + CustomVertexConfiguration vertexConf = new CustomVertexConfiguration(); + DataInputByteBuffer dibb = new DataInputByteBuffer(); + dibb.reset(payload); + try { + vertexConf.readFields(dibb); + } catch (IOException e) { + throw new RuntimeException(e); + } + this.numBuckets = vertexConf.getNumBuckets(); + this.mainWorkName = vertexConf.getInputName(); + this.vertexType = vertexConf.getVertexType(); } @Override @@ -113,17 +129,12 @@ public void onSourceTaskCompleted(String srcVertexName, Integer attemptId) { public void onVertexManagerEventReceived(VertexManagerEvent vmEvent) { } - // One call per root Input - and for now only one is handled. + // One call per root Input @Override public void onRootVertexInitialized(String inputName, InputDescriptor inputDescriptor, List events) { + LOG.info("On root vertex initialized " + inputName); - // Ideally, since there's only 1 Input expected at the moment - - // ensure this method is called only once. Tez will call it once per Root - // Input. - Preconditions.checkState(rootVertexInitialized == false); - LOG.info("Root vertex not initialized"); - rootVertexInitialized = true; try { // This is using the payload from the RootVertexInitializer corresponding // to InputName. Ideally it should be using it's own configuration class - @@ -164,9 +175,6 @@ public void onRootVertexInitialized(String inputName, InputDescriptor inputDescr // No tasks should have been started yet. Checked by initial state // check. Preconditions.checkState(dataInformationEventSeen == false); - Preconditions - .checkState(context.getVertexNumTasks(context.getVertexName()) == -1, - "Parallelism for the vertex should be set to -1 if the InputInitializer is setting parallelism"); InputConfigureVertexTasksEvent cEvent = (InputConfigureVertexTasksEvent) event; // The vertex cannot be configured until all DataEvents are seen - to @@ -220,21 +228,55 @@ public void onRootVertexInitialized(String inputName, InputDescriptor inputDescr (bucketToInitialSplitMap.get(key).toArray(new InputSplit[0])); Multimap groupedSplit = HiveSplitGenerator.generateGroupedSplits(jobConf, conf, inputSplitArray, waves, - availableSlots); + availableSlots, inputName); bucketToGroupedSplitMap.putAll(key, groupedSplit.values()); } - LOG.info("We have grouped the splits into " + bucketToGroupedSplitMap.size() + " tasks"); - processAllEvents(inputName, bucketToGroupedSplitMap); + LOG.info("We have grouped the splits into " + bucketToGroupedSplitMap); + if ((mainWorkName.isEmpty() == false) && (mainWorkName.compareTo(inputName) != 0)) { + /* + * this is the small table side. In case of SMB join, we may need to send each split to the + * corresponding bucket-based task on the other side. In case a split needs to go to + * multiple downstream tasks, we need to clone the event and send it to the right + * destination. + */ + processAllSideEvents(inputName, bucketToGroupedSplitMap); + } else { + processAllEvents(inputName, bucketToGroupedSplitMap); + } } catch (Exception e) { throw new RuntimeException(e); } } + private void processAllSideEvents(String inputName, + Multimap bucketToGroupedSplitMap) throws IOException { + // the bucket to task map should have been setup by the big table. + if (bucketToTaskMap.isEmpty()) { + inputToGroupedSplitMap.put(inputName, bucketToGroupedSplitMap); + return; + } + List taskEvents = new ArrayList(); + for (Entry> entry : bucketToGroupedSplitMap.asMap().entrySet()) { + Collection destTasks = bucketToTaskMap.get(entry.getKey()); + for (Integer task : destTasks) { + for (InputSplit split : entry.getValue()) { + MRSplitProto serializedSplit = MRInputHelpers.createSplitProto(split); + InputDataInformationEvent diEvent = + InputDataInformationEvent.createWithSerializedPayload(task, serializedSplit + .toByteString().asReadOnlyByteBuffer()); + diEvent.setTargetIndex(task); + taskEvents.add(diEvent); + } + } + } + + context.addRootInputEvents(inputName, taskEvents); + } + private void processAllEvents(String inputName, Multimap bucketToGroupedSplitMap) throws IOException { - Multimap bucketToTaskMap = HashMultimap. create(); List finalSplits = Lists.newLinkedList(); for (Entry> entry : bucketToGroupedSplitMap.asMap().entrySet()) { int bucketNum = entry.getKey(); @@ -248,11 +290,13 @@ private void processAllEvents(String inputName, // Construct the EdgeManager descriptor to be used by all edges which need // the routing table. - EdgeManagerPluginDescriptor hiveEdgeManagerDesc = - EdgeManagerPluginDescriptor.create(CustomPartitionEdge.class.getName()); - UserPayload payload = getBytePayload(bucketToTaskMap); - hiveEdgeManagerDesc.setUserPayload(payload); - + EdgeManagerPluginDescriptor hiveEdgeManagerDesc = null; + if ((vertexType == VertexType.MULTI_INPUT_INITIALIZED_EDGES) + || (vertexType == VertexType.INITIALIZED_EDGES)) { + hiveEdgeManagerDesc = EdgeManagerPluginDescriptor.create(CustomPartitionEdge.class.getName()); + UserPayload payload = getBytePayload(bucketToTaskMap); + hiveEdgeManagerDesc.setUserPayload(payload); + } Map emMap = Maps.newHashMap(); // Replace the edge manager for all vertices which have routing type custom. @@ -285,13 +329,21 @@ private void processAllEvents(String inputName, rootInputSpecUpdate.put( inputName, InputSpecUpdate.getDefaultSinglePhysicalInputSpecUpdate()); - context.setVertexParallelism( - taskCount, - VertexLocationHint.create(grouper.createTaskLocationHints(finalSplits - .toArray(new InputSplit[finalSplits.size()]))), emMap, rootInputSpecUpdate); + if ((mainWorkName.compareTo(inputName) == 0) || (mainWorkName.isEmpty())) { + context.setVertexParallelism( + taskCount, + VertexLocationHint.create(grouper.createTaskLocationHints(finalSplits + .toArray(new InputSplit[finalSplits.size()]))), emMap, rootInputSpecUpdate); + } // Set the actual events for the tasks. context.addRootInputEvents(inputName, taskEvents); + if (inputToGroupedSplitMap.isEmpty() == false) { + for (Entry> entry : inputToGroupedSplitMap.entrySet()) { + processAllSideEvents(entry.getKey(), entry.getValue()); + } + inputToGroupedSplitMap.clear(); + } } UserPayload getBytePayload(Multimap routingTable) throws IOException { @@ -315,7 +367,8 @@ private FileSplit getFileSplitFromEvent(InputDataInformationEvent event) throws if (!(inputSplit instanceof FileSplit)) { throw new UnsupportedOperationException( - "Cannot handle splits other than FileSplit for the moment"); + "Cannot handle splits other than FileSplit for the moment. Current input split type: " + + inputSplit.getClass().getSimpleName()); } return (FileSplit) inputSplit; } @@ -327,7 +380,6 @@ private FileSplit getFileSplitFromEvent(InputDataInformationEvent event) throws Map> pathFileSplitsMap) { int bucketNum = 0; - int fsCount = 0; Multimap bucketToInitialSplitMap = ArrayListMultimap. create(); @@ -335,14 +387,20 @@ private FileSplit getFileSplitFromEvent(InputDataInformationEvent event) throws for (Map.Entry> entry : pathFileSplitsMap.entrySet()) { int bucketId = bucketNum % numBuckets; for (FileSplit fsplit : entry.getValue()) { - fsCount++; bucketToInitialSplitMap.put(bucketId, fsplit); } bucketNum++; } - LOG.info("Total number of splits counted: " + fsCount + " and total files encountered: " - + pathFileSplitsMap.size()); + if (bucketNum < numBuckets) { + int loopedBucketId = 0; + for (; bucketNum < numBuckets; bucketNum++) { + for (InputSplit fsplit : bucketToInitialSplitMap.get(loopedBucketId)) { + bucketToInitialSplitMap.put(bucketNum, fsplit); + } + loopedBucketId++; + } + } return bucketToInitialSplitMap; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomVertexConfiguration.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomVertexConfiguration.java new file mode 100644 index 0000000..4829f92 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/CustomVertexConfiguration.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.tez; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; +import org.apache.hadoop.io.Writable; + +/* + * This class is the payload for custom vertex. It serializes and de-serializes + * @numBuckets: the number of buckets of the "big table" + * @vertexType: this is the type of vertex and differentiates between bucket map join and SMB joins + * @inputName: This is the name of the input. Used in case of SMB joins + */ +public class CustomVertexConfiguration implements Writable { + + private int numBuckets; + private VertexType vertexType = VertexType.AUTO_INITIALIZED_EDGES; + private String inputName; + + public CustomVertexConfiguration() { + } + + public CustomVertexConfiguration(int numBuckets, VertexType vertexType, String inputName) { + this.numBuckets = numBuckets; + this.vertexType = vertexType; + this.inputName = inputName; + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeInt(this.vertexType.ordinal()); + out.writeInt(this.numBuckets); + out.writeUTF(inputName); + } + + @Override + public void readFields(DataInput in) throws IOException { + this.vertexType = VertexType.values()[in.readInt()]; + this.numBuckets = in.readInt(); + this.inputName = in.readUTF(); + } + + public int getNumBuckets() { + return numBuckets; + } + + public VertexType getVertexType() { + return vertexType; + } + + public String getInputName() { + return inputName; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index ac4b5a1..0a0418e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -20,6 +20,23 @@ import com.google.common.base.Function; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; + +import javax.security.auth.login.LoginException; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + import org.apache.commons.io.FilenameUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; @@ -32,6 +49,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.ExecMapper; import org.apache.hadoop.hive.ql.exec.mr.ExecReducer; @@ -47,10 +65,12 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.MergeJoinWork; import org.apache.hadoop.hive.ql.plan.ReduceWork; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsFactory; import org.apache.hadoop.hive.ql.stats.StatsPublisher; @@ -90,12 +110,16 @@ import org.apache.tez.dag.api.VertexGroup; import org.apache.tez.dag.api.VertexManagerPluginDescriptor; import org.apache.tez.dag.library.vertexmanager.ShuffleVertexManager; +import org.apache.tez.mapreduce.common.MRInputAMSplitGenerator; import org.apache.tez.mapreduce.hadoop.MRHelpers; import org.apache.tez.mapreduce.hadoop.MRInputHelpers; import org.apache.tez.mapreduce.hadoop.MRJobConfig; +import org.apache.tez.mapreduce.input.MRInput; import org.apache.tez.mapreduce.input.MRInputLegacy; +import org.apache.tez.mapreduce.input.MultiMRInput; import org.apache.tez.mapreduce.output.MROutput; import org.apache.tez.mapreduce.partition.MRPartitioner; +import org.apache.tez.mapreduce.protos.MRRuntimeProtos; import org.apache.tez.runtime.library.api.TezRuntimeConfiguration; import org.apache.tez.runtime.library.common.comparator.TezBytesComparator; import org.apache.tez.runtime.library.common.serializer.TezBytesWritableSerialization; @@ -104,21 +128,6 @@ import org.apache.tez.runtime.library.conf.UnorderedPartitionedKVEdgeConfig; import org.apache.tez.runtime.library.input.ConcatenatedMergedKeyValueInput; -import javax.security.auth.login.LoginException; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.TimeUnit; - /** * DagUtils. DagUtils is a collection of helper methods to convert * map and reduce work to tez vertices and edges. It handles configuration @@ -130,6 +139,11 @@ private static final Log LOG = LogFactory.getLog(DagUtils.class.getName()); private static final String TEZ_DIR = "_tez_scratch_dir"; private static DagUtils instance; + // The merge file being currently processed. + public static final String TEZ_MERGE_CURRENT_MERGE_FILE_PREFIX = + "hive.tez.current.merge.file.prefix"; + // "A comma separated list of work names used as prefix. + public static final String TEZ_MERGE_WORK_FILE_PREFIXES = "hive.tez.merge.file.prefixes"; private void addCredentials(MapWork mapWork, DAG dag) { Set paths = mapWork.getPathToAliases().keySet(); @@ -238,8 +252,8 @@ private JobConf initializeVertexConf(JobConf baseConf, Context context, MapWork * endpoints. */ @SuppressWarnings("rawtypes") - public GroupInputEdge createEdge(VertexGroup group, JobConf vConf, - Vertex w, TezEdgeProperty edgeProp) + public GroupInputEdge createEdge(VertexGroup group, JobConf vConf, Vertex w, + TezEdgeProperty edgeProp, VertexType vertexType) throws IOException { Class mergeInputClass; @@ -254,10 +268,14 @@ public GroupInputEdge createEdge(VertexGroup group, JobConf vConf, case CUSTOM_EDGE: { mergeInputClass = ConcatenatedMergedKeyValueInput.class; int numBuckets = edgeProp.getNumBuckets(); + CustomVertexConfiguration vertexConf = + new CustomVertexConfiguration(numBuckets, vertexType, ""); + DataOutputBuffer dob = new DataOutputBuffer(); + vertexConf.write(dob); VertexManagerPluginDescriptor desc = VertexManagerPluginDescriptor.create(CustomPartitionVertex.class.getName()); - ByteBuffer userPayload = ByteBuffer.allocate(4).putInt(numBuckets); - userPayload.flip(); + byte[] userPayloadBytes = dob.getData(); + ByteBuffer userPayload = ByteBuffer.wrap(userPayloadBytes); desc.setUserPayload(UserPayload.create(userPayload)); w.setVertexManagerPlugin(desc); break; @@ -289,17 +307,21 @@ public GroupInputEdge createEdge(VertexGroup group, JobConf vConf, * @param w The second vertex (sink) * @return */ - public Edge createEdge(JobConf vConf, Vertex v, Vertex w, - TezEdgeProperty edgeProp) + public Edge createEdge(JobConf vConf, Vertex v, Vertex w, TezEdgeProperty edgeProp, + VertexType vertexType) throws IOException { switch(edgeProp.getEdgeType()) { case CUSTOM_EDGE: { int numBuckets = edgeProp.getNumBuckets(); - ByteBuffer userPayload = ByteBuffer.allocate(4).putInt(numBuckets); - userPayload.flip(); + CustomVertexConfiguration vertexConf = + new CustomVertexConfiguration(numBuckets, vertexType, ""); + DataOutputBuffer dob = new DataOutputBuffer(); + vertexConf.write(dob); VertexManagerPluginDescriptor desc = VertexManagerPluginDescriptor.create( CustomPartitionVertex.class.getName()); + byte[] userPayloadBytes = dob.getData(); + ByteBuffer userPayload = ByteBuffer.wrap(userPayloadBytes); desc.setUserPayload(UserPayload.create(userPayload)); w.setVertexManagerPlugin(desc); break; @@ -405,7 +427,7 @@ private EdgeProperty createEdgeProperty(TezEdgeProperty edgeProp, Configuration * from yarn. Falls back to Map-reduce's map size if tez * container size isn't set. */ - private Resource getContainerResource(Configuration conf) { + public static Resource getContainerResource(Configuration conf) { int memory = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) > 0 ? HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) : conf.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB); @@ -443,12 +465,61 @@ private String getContainerJavaOpts(Configuration conf) { return MRHelpers.getJavaOptsForMRMapper(conf); } + private Vertex createVertex(JobConf conf, MergeJoinWork mergeJoinWork, LocalResource appJarLr, + List additionalLr, FileSystem fs, Path mrScratchDir, Context ctx, + VertexType vertexType) + throws Exception { + Utilities.setMergeWork(conf, mergeJoinWork, mrScratchDir, false); + if (mergeJoinWork.getMainWork() instanceof MapWork) { + List mapWorkList = mergeJoinWork.getBaseWorkList(); + MapWork mapWork = (MapWork) (mergeJoinWork.getMainWork()); + CommonMergeJoinOperator mergeJoinOp = mergeJoinWork.getMergeJoinOperator(); + Vertex mergeVx = + createVertex(conf, mapWork, appJarLr, additionalLr, fs, mrScratchDir, ctx, vertexType); + + // grouping happens in execution phase. Setting the class to TezGroupedSplitsInputFormat + // here would cause pre-mature grouping which would be incorrect. + Class inputFormatClass = HiveInputFormat.class; + conf.setClass("mapred.input.format.class", HiveInputFormat.class, InputFormat.class); + // mapreduce.tez.input.initializer.serialize.event.payload should be set + // to false when using this plug-in to avoid getting a serialized event at run-time. + conf.setBoolean("mapreduce.tez.input.initializer.serialize.event.payload", false); + for (int i = 0; i < mapWorkList.size(); i++) { + + mapWork = (MapWork) (mapWorkList.get(i)); + conf.set(TEZ_MERGE_CURRENT_MERGE_FILE_PREFIX, mapWork.getName()); + conf.set(Utilities.INPUT_NAME, mapWork.getName()); + LOG.info("Going through each work and adding MultiMRInput"); + mergeVx.addDataSource(mapWork.getName(), + MultiMRInput.createConfigBuilder(conf, HiveInputFormat.class).build()); + } + + VertexManagerPluginDescriptor desc = + VertexManagerPluginDescriptor.create(CustomPartitionVertex.class.getName()); + CustomVertexConfiguration vertexConf = + new CustomVertexConfiguration(mergeJoinWork.getMergeJoinOperator().getConf() + .getNumBuckets(), vertexType, mergeJoinWork.getBigTableAlias()); + DataOutputBuffer dob = new DataOutputBuffer(); + vertexConf.write(dob); + byte[] userPayload = dob.getData(); + desc.setUserPayload(UserPayload.create(ByteBuffer.wrap(userPayload))); + mergeVx.setVertexManagerPlugin(desc); + return mergeVx; + } else { + Vertex mergeVx = + createVertex(conf, (ReduceWork) mergeJoinWork.getMainWork(), appJarLr, additionalLr, fs, + mrScratchDir, ctx); + return mergeVx; + } + } + /* * Helper function to create Vertex from MapWork. */ private Vertex createVertex(JobConf conf, MapWork mapWork, LocalResource appJarLr, List additionalLr, FileSystem fs, - Path mrScratchDir, Context ctx, TezWork tezWork) throws Exception { + Path mrScratchDir, Context ctx, VertexType vertexType) + throws Exception { Path tezDir = getTezDir(mrScratchDir); @@ -470,15 +541,8 @@ private Vertex createVertex(JobConf conf, MapWork mapWork, Class inputFormatClass = conf.getClass("mapred.input.format.class", InputFormat.class); - boolean vertexHasCustomInput = false; - if (tezWork != null) { - for (BaseWork baseWork : tezWork.getParents(mapWork)) { - if (tezWork.getEdgeType(baseWork, mapWork) == EdgeType.CUSTOM_EDGE) { - vertexHasCustomInput = true; - } - } - } - + boolean vertexHasCustomInput = VertexType.isCustomInputType(vertexType); + LOG.info("Vertex has custom input? " + vertexHasCustomInput); if (vertexHasCustomInput) { groupSplitsInInputInitializer = false; // grouping happens in execution phase. The input payload should not enable grouping here, @@ -513,6 +577,8 @@ private Vertex createVertex(JobConf conf, MapWork mapWork, } } + // remember mapping of plan to input + conf.set(Utilities.INPUT_NAME, mapWork.getName()); if (HiveConf.getBoolVar(conf, ConfVars.HIVE_AM_SPLIT_GENERATION) && !mapWork.isUseOneNullRowInputFormat()) { @@ -593,6 +659,7 @@ private Vertex createVertex(JobConf conf, ReduceWork reduceWork, Path mrScratchDir, Context ctx) throws Exception { // set up operator plan + conf.set(Utilities.INPUT_NAME, reduceWork.getName()); Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false); // create the directories FileSinkOperators need @@ -937,12 +1004,22 @@ public JobConf initializeVertexConf(JobConf conf, Context context, BaseWork work return initializeVertexConf(conf, context, (MapWork)work); } else if (work instanceof ReduceWork) { return initializeVertexConf(conf, context, (ReduceWork)work); + } else if (work instanceof MergeJoinWork) { + return initializeVertexConf(conf, context, (MergeJoinWork) work); } else { assert false; return null; } } + private JobConf initializeVertexConf(JobConf conf, Context context, MergeJoinWork work) { + if (work.getMainWork() instanceof MapWork) { + return initializeVertexConf(conf, context, (MapWork) (work.getMainWork())); + } else { + return initializeVertexConf(conf, context, (ReduceWork) (work.getMainWork())); + } + } + /** * Create a vertex from a given work object. * @@ -958,18 +1035,21 @@ public JobConf initializeVertexConf(JobConf conf, Context context, BaseWork work */ public Vertex createVertex(JobConf conf, BaseWork work, Path scratchDir, LocalResource appJarLr, - List additionalLr, - FileSystem fileSystem, Context ctx, boolean hasChildren, TezWork tezWork) throws Exception { + List additionalLr, FileSystem fileSystem, Context ctx, boolean hasChildren, + TezWork tezWork, VertexType vertexType) throws Exception { Vertex v = null; // simply dispatch the call to the right method for the actual (sub-) type of // BaseWork. if (work instanceof MapWork) { - v = createVertex(conf, (MapWork) work, appJarLr, - additionalLr, fileSystem, scratchDir, ctx, tezWork); + v = createVertex(conf, (MapWork) work, appJarLr, additionalLr, fileSystem, scratchDir, ctx, + vertexType); } else if (work instanceof ReduceWork) { v = createVertex(conf, (ReduceWork) work, appJarLr, additionalLr, fileSystem, scratchDir, ctx); + } else if (work instanceof MergeJoinWork) { + v = createVertex(conf, (MergeJoinWork) work, appJarLr, additionalLr, fileSystem, scratchDir, + ctx, vertexType); } else { // something is seriously wrong if this is happening throw new HiveException(ErrorMsg.GENERIC_ERROR.getErrorCodedMsg()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java index 78d6cf5..7ba2ae1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DynamicPartitionPruner.java @@ -31,6 +31,7 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentSkipListSet; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; @@ -59,6 +60,7 @@ import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.tez.dag.api.event.VertexState; import org.apache.tez.runtime.api.InputInitializerContext; import org.apache.tez.runtime.api.events.InputInitializerEvent; @@ -77,12 +79,13 @@ private final BytesWritable writable = new BytesWritable(); - private final BlockingQueue queue = - new LinkedBlockingQueue(); + private final BlockingQueue queue = new LinkedBlockingQueue(); + + private final Set sourcesWaitingForEvents = new HashSet(); private int sourceInfoCount = 0; - private InputInitializerContext context; + private final Object endOfEvents = new Object(); public DynamicPartitionPruner() { } @@ -91,8 +94,21 @@ public void prune(MapWork work, JobConf jobConf, InputInitializerContext context throws SerDeException, IOException, InterruptedException, HiveException { - this.context = context; - this.initialize(work, jobConf); + synchronized(sourcesWaitingForEvents) { + initialize(work, jobConf); + + if (sourcesWaitingForEvents.isEmpty()) { + return; + } + + Set states = Collections.singleton(VertexState.SUCCEEDED); + for (String source : sourcesWaitingForEvents) { + // we need to get state transition updates for the vertices that will send + // events to us. once we have received all events and a vertex has succeeded, + // we can move to do the pruning. + context.registerForVertexStateUpdates(source, states); + } + } LOG.info("Waiting for events (" + sourceInfoCount + " items) ..."); // synchronous event processing loop. Won't return until all events have @@ -102,7 +118,7 @@ public void prune(MapWork work, JobConf jobConf, InputInitializerContext context LOG.info("Ok to proceed."); } - public BlockingQueue getQueue() { + public BlockingQueue getQueue() { return queue; } @@ -111,11 +127,14 @@ private void clear() { sourceInfoCount = 0; } - private void initialize(MapWork work, JobConf jobConf) throws SerDeException { + public void initialize(MapWork work, JobConf jobConf) throws SerDeException { this.clear(); Map columnMap = new HashMap(); + Set sources = work.getEventSourceTableDescMap().keySet(); + + sourcesWaitingForEvents.addAll(sources); - for (String s : work.getEventSourceTableDescMap().keySet()) { + for (String s : sources) { List tables = work.getEventSourceTableDescMap().get(s); List columnNames = work.getEventSourceColumnNameMap().get(s); List partKeyExprs = work.getEventSourcePartKeyExprMap().get(s); @@ -277,46 +296,30 @@ public SourceInfo(TableDesc table, ExprNodeDesc partKey, String columnName, JobC private void processEvents() throws SerDeException, IOException, InterruptedException { int eventCount = 0; - int neededEvents = getExpectedNumberOfEvents(); - while (neededEvents > eventCount) { - InputInitializerEvent event = queue.take(); + while (true) { + Object element = queue.take(); + + if (element == endOfEvents) { + // we're done processing events + break; + } + + InputInitializerEvent event = (InputInitializerEvent) element; + LOG.info("Input event: " + event.getTargetInputName() + ", " + event.getTargetVertexName() + ", " + (event.getUserPayload().limit() - event.getUserPayload().position())); - processPayload(event.getUserPayload()); + processPayload(event.getUserPayload(), event.getSourceVertexName()); eventCount += 1; - neededEvents = getExpectedNumberOfEvents(); - LOG.info("Needed events: " + neededEvents + ", received events: " + eventCount); } - } - - private int getExpectedNumberOfEvents() throws InterruptedException { - int neededEvents = 0; - - boolean notInitialized; - do { - neededEvents = 0; - notInitialized = false; - for (String s : sourceInfoMap.keySet()) { - int multiplier = sourceInfoMap.get(s).size(); - int taskNum = context.getVertexNumTasks(s); - LOG.info("Vertex " + s + " has " + taskNum + " events."); - if (taskNum < 0) { - notInitialized = true; - Thread.sleep(10); - continue; - } - neededEvents += (taskNum * multiplier); - } - } while (notInitialized); - - return neededEvents; + LOG.info("Received events: " + eventCount); } @SuppressWarnings("deprecation") - private String processPayload(ByteBuffer payload) throws SerDeException, IOException { + private String processPayload(ByteBuffer payload, String sourceName) throws SerDeException, + IOException { + DataInputStream in = new DataInputStream(new ByteBufferBackedInputStream(payload)); - String sourceName = in.readUTF(); String columnName = in.readUTF(); boolean skip = in.readBoolean(); @@ -390,4 +393,26 @@ public int read(byte[] bytes, int off, int len) throws IOException { } } + public void addEvent(InputInitializerEvent event) { + synchronized(sourcesWaitingForEvents) { + if (sourcesWaitingForEvents.contains(event.getSourceVertexName())) { + queue.offer(event); + } + } + } + + public void processVertex(String name) { + LOG.info("Vertex succeeded: " + name); + + synchronized(sourcesWaitingForEvents) { + sourcesWaitingForEvents.remove(name); + + if (sourcesWaitingForEvents.isEmpty()) { + // we've got what we need; mark the queue + queue.offer(endOfEvents); + } else { + LOG.info("Waiting for " + sourcesWaitingForEvents.size() + " events."); + } + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java index 6e1379e..c45479f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java @@ -38,8 +38,9 @@ import org.apache.hadoop.mapreduce.split.TezMapReduceSplitsGrouper; import org.apache.hadoop.util.ReflectionUtils; import org.apache.tez.common.TezUtils; -import org.apache.tez.dag.api.VertexLocationHint; import org.apache.tez.dag.api.TaskLocationHint; +import org.apache.tez.dag.api.VertexLocationHint; +import org.apache.tez.dag.api.event.VertexStateUpdate; import org.apache.tez.mapreduce.hadoop.InputSplitInfoMem; import org.apache.tez.mapreduce.hadoop.MRInputHelpers; import org.apache.tez.mapreduce.protos.MRRuntimeProtos.MRInputUserPayloadProto; @@ -152,8 +153,21 @@ public HiveSplitGenerator(InputInitializerContext initializerContext) { public static Multimap generateGroupedSplits(JobConf jobConf, Configuration conf, InputSplit[] splits, float waves, int availableSlots) throws Exception { + return generateGroupedSplits(jobConf, conf, splits, waves, availableSlots, null); + } - MapWork work = Utilities.getMapWork(jobConf); + public static Multimap generateGroupedSplits(JobConf jobConf, + Configuration conf, InputSplit[] splits, float waves, int availableSlots, + String inputName) throws Exception { + + MapWork work = null; + if (inputName != null) { + work = (MapWork) Utilities.getMergeWork(jobConf, inputName); + // work can still be null if there is no merge work for this input + } + if (work == null) { + work = Utilities.getMapWork(jobConf); + } Multimap bucketSplitMultiMap = ArrayListMultimap. create(); @@ -230,9 +244,14 @@ public HiveSplitGenerator(InputInitializerContext initializerContext) { } @Override + public void onVertexStateUpdated(VertexStateUpdate stateUpdate) { + pruner.processVertex(stateUpdate.getVertexName()); + } + + @Override public void handleInputInitializerEvent(List events) throws Exception { for (InputInitializerEvent e : events) { - pruner.getQueue().put(e); + pruner.addEvent(e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java index 37b7bbd..c77e081 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java @@ -17,14 +17,20 @@ */ package org.apache.hadoop.hive.ql.exec.tez; -import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.TreeMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.exec.DummyStoreOperator; import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; import org.apache.hadoop.hive.ql.exec.MapOperator; import org.apache.hadoop.hive.ql.exec.MapredContext; @@ -36,15 +42,17 @@ import org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.exec.tez.TezProcessor.TezKVOutputCollector; +import org.apache.hadoop.hive.ql.exec.tez.tools.KeyValueInputMerger; import org.apache.hadoop.hive.ql.exec.vector.VectorMapOperator; +import org.apache.hadoop.hive.ql.io.IOContext; import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.util.StringUtils; import org.apache.tez.mapreduce.input.MRInputLegacy; +import org.apache.tez.mapreduce.input.MultiMRInput; import org.apache.tez.mapreduce.processor.MRTaskReporter; +import org.apache.tez.runtime.api.Input; import org.apache.tez.runtime.api.LogicalInput; import org.apache.tez.runtime.api.LogicalOutput; import org.apache.tez.runtime.api.ProcessorContext; @@ -58,27 +66,61 @@ private MapOperator mapOp; + private final List mergeMapOpList = new ArrayList(); public static final Log l4j = LogFactory.getLog(MapRecordProcessor.class); - private final ExecMapperContext execContext = new ExecMapperContext(); + private MapRecordSource[] sources; + private final Map multiMRInputMap = new HashMap(); + private int position = 0; + private boolean foundCachedMergeWork = false; + MRInputLegacy legacyMRInput = null; + private ExecMapperContext execContext = null; private boolean abort = false; protected static final String MAP_PLAN_KEY = "__MAP_PLAN__"; private MapWork mapWork; + List mergeWorkList = null; + private static Map connectOps = + new TreeMap(); - public MapRecordProcessor(JobConf jconf) { + public MapRecordProcessor(JobConf jconf) throws Exception { ObjectCache cache = ObjectCacheFactory.getCache(jconf); + execContext = new ExecMapperContext(jconf); execContext.setJc(jconf); // create map and fetch operators mapWork = (MapWork) cache.retrieve(MAP_PLAN_KEY); if (mapWork == null) { mapWork = Utilities.getMapWork(jconf); cache.cache(MAP_PLAN_KEY, mapWork); - l4j.info("Plan: "+mapWork); + l4j.debug("Plan: " + mapWork); for (String s: mapWork.getAliases()) { - l4j.info("Alias: "+s); + l4j.debug("Alias: " + s); } } else { Utilities.setMapWork(jconf, mapWork); } + + String prefixes = jconf.get(DagUtils.TEZ_MERGE_WORK_FILE_PREFIXES); + if (prefixes != null) { + mergeWorkList = new ArrayList(); + for (String prefix : prefixes.split(",")) { + MapWork mergeMapWork = (MapWork) cache.retrieve(prefix); + if (mergeMapWork != null) { + l4j.info("Found merge work in cache"); + foundCachedMergeWork = true; + mergeWorkList.add(mergeMapWork); + continue; + } + if (foundCachedMergeWork) { + throw new Exception( + "Should find all work in cache else operator pipeline will be in non-deterministic state"); + } + + if ((prefix != null) && (prefix.isEmpty() == false)) { + mergeMapWork = (MapWork) Utilities.getMergeWork(jconf, prefix); + mergeWorkList.add(mergeMapWork); + cache.cache(prefix, mergeMapWork); + } + } + } } @Override @@ -88,8 +130,8 @@ void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrRep super.init(jconf, processorContext, mrReporter, inputs, outputs); //Update JobConf using MRInput, info like filename comes via this - MRInputLegacy mrInput = TezProcessor.getMRInput(inputs); - Configuration updatedConf = mrInput.getConfigUpdates(); + legacyMRInput = getMRInput(inputs); + Configuration updatedConf = legacyMRInput.getConfigUpdates(); if (updatedConf != null) { for (Entry entry : updatedConf) { jconf.set(entry.getKey(), entry.getValue()); @@ -99,20 +141,52 @@ void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrRep createOutputMap(); // Start all the Outputs. for (Entry outputEntry : outputs.entrySet()) { - l4j.info("Starting Output: " + outputEntry.getKey()); + l4j.debug("Starting Output: " + outputEntry.getKey()); outputEntry.getValue().start(); ((TezKVOutputCollector) outMap.get(outputEntry.getKey())).initialize(); } try { + if (mapWork.getVectorMode()) { mapOp = new VectorMapOperator(); } else { mapOp = new MapOperator(); } + connectOps.clear(); + if (mergeWorkList != null) { + MapOperator mergeMapOp = null; + for (MapWork mergeMapWork : mergeWorkList) { + processorContext.waitForAnyInputReady(Collections.singletonList((Input) (inputs + .get(mergeMapWork.getName())))); + if (mergeMapWork.getVectorMode()) { + mergeMapOp = new VectorMapOperator(); + } else { + mergeMapOp = new MapOperator(); + } + + mergeMapOpList.add(mergeMapOp); + // initialize the merge operators first. + if (mergeMapOp != null) { + mergeMapOp.setConf(mergeMapWork); + l4j.info("Input name is " + mergeMapWork.getName()); + jconf.set(Utilities.INPUT_NAME, mergeMapWork.getName()); + mergeMapOp.setChildren(jconf); + if (foundCachedMergeWork == false) { + DummyStoreOperator dummyOp = getJoinParentOp(mergeMapOp); + connectOps.put(mergeMapWork.getTag(), dummyOp); + } + mergeMapOp.setExecContext(new ExecMapperContext(jconf)); + mergeMapOp.initializeLocalWork(jconf); + } + } + } + // initialize map operator mapOp.setConf(mapWork); + l4j.info("Main input name is " + mapWork.getName()); + jconf.set(Utilities.INPUT_NAME, mapWork.getName()); mapOp.setChildren(jconf); l4j.info(mapOp.dump(0)); @@ -121,12 +195,21 @@ void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrRep ((TezContext) MapredContext.get()).setTezProcessorContext(processorContext); mapOp.setExecContext(execContext); mapOp.initializeLocalWork(jconf); + + initializeMapRecordSources(); mapOp.initialize(jconf, null); + if ((mergeMapOpList != null) && mergeMapOpList.isEmpty() == false) { + for (MapOperator mergeMapOp : mergeMapOpList) { + jconf.set(Utilities.INPUT_NAME, mergeMapOp.getConf().getName()); + mergeMapOp.initialize(jconf, null); + } + } // Initialization isn't finished until all parents of all operators // are initialized. For broadcast joins that means initializing the // dummy parent operators as well. List dummyOps = mapWork.getDummyOps(); + jconf.set(Utilities.INPUT_NAME, mapWork.getName()); if (dummyOps != null) { for (Operator dummyOp : dummyOps){ dummyOp.setExecContext(execContext); @@ -151,54 +234,46 @@ void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrRep perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS); } - @Override - void run() throws IOException{ - - MRInputLegacy in = TezProcessor.getMRInput(inputs); - KeyValueReader reader = in.getReader(); + private void initializeMapRecordSources() throws Exception { + int size = mergeMapOpList.size() + 1; // the +1 is for the main map operator itself + sources = new MapRecordSource[size]; + KeyValueReader reader = legacyMRInput.getReader(); + position = mapOp.getConf().getTag(); + sources[position] = new MapRecordSource(); + sources[position].init(jconf, mapOp, reader); + for (MapOperator mapOp : mergeMapOpList) { + int tag = mapOp.getConf().getTag(); + sources[tag] = new MapRecordSource(); + String inputName = mapOp.getConf().getName(); + MultiMRInput multiMRInput = multiMRInputMap.get(inputName); + Collection kvReaders = multiMRInput.getKeyValueReaders(); + l4j.debug("There are " + kvReaders.size() + " key-value readers for input " + inputName); + List kvReaderList = new ArrayList(kvReaders); + reader = new KeyValueInputMerger(kvReaderList); + sources[tag].init(jconf, mapOp, reader); + } + ((TezContext) MapredContext.get()).setRecordSources(sources); + } - //process records until done - while(reader.next()){ - //ignore the key for maps - reader.getCurrentKey(); - Object value = reader.getCurrentValue(); - boolean needMore = processRow(value); - if(!needMore){ - break; + private DummyStoreOperator getJoinParentOp(Operator mergeMapOp) { + for (Operator childOp : mergeMapOp.getChildOperators()) { + if ((childOp.getChildOperators() == null) || (childOp.getChildOperators().isEmpty())) { + return (DummyStoreOperator) childOp; + } else { + return getJoinParentOp(childOp); } } + return null; } + @Override + void run() throws Exception { - /** - * @param value value to process - * @return true if it is not done and can take more inputs - */ - private boolean processRow(Object value) { - // reset the execContext for each new row - execContext.resetRow(); - - try { - if (mapOp.getDone()) { - return false; //done - } else { - // Since there is no concept of a group, we don't invoke - // startGroup/endGroup for a mapper - mapOp.process((Writable)value); - if (isLogInfoEnabled) { - logProgress(); - } - } - } catch (Throwable e) { - abort = true; - if (e instanceof OutOfMemoryError) { - // Don't create a new object if we are already out of memory - throw (OutOfMemoryError) e; - } else { - l4j.fatal(StringUtils.stringifyException(e)); - throw new RuntimeException(e); + while (sources[position].pushRecord()) { + if (isLogInfoEnabled) { + logProgress(); } } - return true; //give me more } @Override @@ -214,6 +289,11 @@ void close(){ return; } mapOp.close(abort); + if (mergeMapOpList.isEmpty() == false) { + for (MapOperator mergeMapOp : mergeMapOpList) { + mergeMapOp.close(abort); + } + } // Need to close the dummyOps as well. The operator pipeline // is not considered "closed/done" unless all operators are @@ -242,4 +322,27 @@ void close(){ MapredContext.close(); } } + + public static Map getConnectOps() { + return connectOps; + } + + private MRInputLegacy getMRInput(Map inputs) throws Exception { + // there should be only one MRInput + MRInputLegacy theMRInput = null; + l4j.info("The input names are: " + Arrays.toString(inputs.keySet().toArray())); + for (Entry inp : inputs.entrySet()) { + if (inp.getValue() instanceof MRInputLegacy) { + if (theMRInput != null) { + throw new IllegalArgumentException("Only one MRInput is expected"); + } + // a better logic would be to find the alias + theMRInput = (MRInputLegacy) inp.getValue(); + } else if (inp.getValue() instanceof MultiMRInput) { + multiMRInputMap.put(inp.getKey(), (MultiMRInput) inp.getValue()); + } + } + theMRInput.init(); + return theMRInput; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordSource.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordSource.java new file mode 100644 index 0000000..0419568 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordSource.java @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.tez; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.MapOperator; +import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.StringUtils; +import org.apache.tez.mapreduce.input.MRInput; +import org.apache.tez.runtime.library.api.KeyValueReader; + +/** + * Process input from tez LogicalInput and write output - for a map plan Just pump the records + * through the query plan. + */ + +public class MapRecordSource implements RecordSource { + + public static final Log LOG = LogFactory.getLog(MapRecordSource.class); + private ExecMapperContext execContext = null; + private MapOperator mapOp = null; + private KeyValueReader reader = null; + private final boolean grouped = false; + + void init(JobConf jconf, MapOperator mapOp, KeyValueReader reader) throws IOException { + execContext = new ExecMapperContext(jconf); + this.mapOp = mapOp; + this.reader = reader; + } + + @Override + public final boolean isGrouped() { + return grouped; + } + + @Override + public boolean pushRecord() throws HiveException { + execContext.resetRow(); + + try { + if (reader.next()) { + Object value; + try { + value = reader.getCurrentValue(); + } catch (IOException e) { + throw new HiveException(e); + } + return processRow(value); + } + } catch (IOException e) { + throw new HiveException(e); + } + return false; + } + + private boolean processRow(Object value) { + try { + if (mapOp.getDone()) { + return false; // done + } else { + // Since there is no concept of a group, we don't invoke + // startGroup/endGroup for a mapper + mapOp.process((Writable) value); + } + } catch (Throwable e) { + if (e instanceof OutOfMemoryError) { + // Don't create a new object if we are already out of memory + throw (OutOfMemoryError) e; + } else { + LOG.fatal(StringUtils.stringifyException(e)); + throw new RuntimeException(e); + } + } + return true; // give me more + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java index 3425bf6..d2d1962 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java @@ -20,6 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.MapredContext; import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; import org.apache.hadoop.hive.ql.exec.Operator; @@ -40,7 +41,9 @@ import org.apache.tez.runtime.api.ProcessorContext; import org.apache.tez.runtime.library.api.KeyValueReader; +import java.io.IOException; import java.util.Map; +import java.util.Map.Entry; /** * Record processor for fast merging of files. @@ -51,11 +54,12 @@ .getLog(MergeFileRecordProcessor.class); protected Operator mergeOp; - private final ExecMapperContext execContext = new ExecMapperContext(); + private ExecMapperContext execContext = null; protected static final String MAP_PLAN_KEY = "__MAP_PLAN__"; private MergeFileWork mfWork; + MRInputLegacy mrInput = null; private boolean abort = false; - private Object[] row = new Object[2]; + private final Object[] row = new Object[2]; @Override void init(JobConf jconf, ProcessorContext processorContext, @@ -63,16 +67,16 @@ void init(JobConf jconf, ProcessorContext processorContext, Map outputs) throws Exception { perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS); super.init(jconf, processorContext, mrReporter, inputs, outputs); + execContext = new ExecMapperContext(jconf); //Update JobConf using MRInput, info like filename comes via this - MRInputLegacy mrInput = TezProcessor.getMRInput(inputs); + mrInput = getMRInput(inputs); Configuration updatedConf = mrInput.getConfigUpdates(); if (updatedConf != null) { for (Map.Entry entry : updatedConf) { jconf.set(entry.getKey(), entry.getValue()); } } - createOutputMap(); // Start all the Outputs. for (Map.Entry outputEntry : outputs.entrySet()) { @@ -127,8 +131,7 @@ void init(JobConf jconf, ProcessorContext processorContext, @Override void run() throws Exception { - MRInputLegacy in = TezProcessor.getMRInput(inputs); - KeyValueReader reader = in.getReader(); + KeyValueReader reader = mrInput.getReader(); //process records until done while (reader.next()) { @@ -205,4 +208,23 @@ private boolean processRow(Object key, Object value) { return true; //give me more } + private MRInputLegacy getMRInput(Map inputs) throws Exception { + // there should be only one MRInput + MRInputLegacy theMRInput = null; + for (Entry inp : inputs.entrySet()) { + if (inp.getValue() instanceof MRInputLegacy) { + if (theMRInput != null) { + throw new IllegalArgumentException("Only one MRInput is expected"); + } + // a better logic would be to find the alias + theMRInput = (MRInputLegacy) inp.getValue(); + } else { + throw new IOException("Expecting only one input of type MRInputLegacy. Found type: " + + inp.getClass().getCanonicalName()); + } + } + theMRInput.init(); + + return theMRInput; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileTezProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileTezProcessor.java index 7fff28e..e341d40 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileTezProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileTezProcessor.java @@ -39,12 +39,6 @@ public MergeFileTezProcessor(ProcessorContext context) { public void run(Map inputs, Map outputs) throws Exception { rproc = new MergeFileRecordProcessor(); - MRInputLegacy mrInput = getMRInput(inputs); - try { - mrInput.init(); - } catch (IOException e) { - throw new RuntimeException("Failed while initializing MRInput", e); - } initializeAndRunProcessor(inputs, outputs); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java index a00d162..372c54d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordProcessor.java @@ -115,8 +115,7 @@ void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrRep */ protected void logCloseInfo() { long used_memory = memoryMXBean.getHeapMemoryUsage().getUsed(); - l4j.info("ExecMapper: processed " + numRows + " rows: used memory = " - + used_memory); + l4j.info("TezProcessor: processed " + numRows + " rows/groups: used memory = " + used_memory); } /** @@ -126,8 +125,7 @@ protected void logProgress() { numRows++; if (numRows == nextUpdateCntr) { long used_memory = memoryMXBean.getHeapMemoryUsage().getUsed(); - l4j.info("ExecMapper: processing " + numRows - + " rows: used memory = " + used_memory); + l4j.info("TezProcessor: processing " + numRows + " rows/groups: used memory = " + used_memory); nextUpdateCntr = getNextUpdateRecordCounter(numRows); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordSource.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordSource.java new file mode 100644 index 0000000..2cfa8f1 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/RecordSource.java @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import org.apache.hadoop.hive.ql.metadata.HiveException; + +public interface RecordSource { + public boolean pushRecord() throws HiveException; + public boolean isGrouped(); +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java index 990a4f1..941f97c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java @@ -17,9 +17,7 @@ */ package org.apache.hadoop.hive.ql.exec.tez; -import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -35,31 +33,13 @@ import org.apache.hadoop.hive.ql.exec.OperatorUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mr.ExecMapper.ReportStats; -import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.exec.tez.TezProcessor.TezKVOutputCollector; -import org.apache.hadoop.hive.ql.exec.tez.tools.InputMerger; -import org.apache.hadoop.hive.ql.exec.vector.VectorizedBatchUtil; -import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; -import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; -import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory; import org.apache.hadoop.hive.ql.log.PerfLogger; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceWork; import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hadoop.hive.serde2.SerDe; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; -import org.apache.hadoop.hive.serde2.objectinspector.StructField; -import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.util.StringUtils; import org.apache.tez.mapreduce.processor.MRTaskReporter; import org.apache.tez.runtime.api.Input; import org.apache.tez.runtime.api.LogicalInput; @@ -76,39 +56,16 @@ private static final String REDUCE_PLAN_KEY = "__REDUCE_PLAN__"; public static final Log l4j = LogFactory.getLog(ReduceRecordProcessor.class); - private final ExecMapperContext execContext = new ExecMapperContext(); - private boolean abort = false; - private Deserializer inputKeyDeserializer; - // Input value serde needs to be an array to support different SerDe - // for different tags - private final SerDe[] inputValueDeserializer = new SerDe[Byte.MAX_VALUE]; - - TableDesc keyTableDesc; - TableDesc[] valueTableDesc; + private ReduceWork redWork; - ObjectInspector[] rowObjectInspector; private Operator reducer; - private boolean isTagged = false; - - private Object keyObject = null; - private BytesWritable groupKey; - private ReduceWork redWork; - - private boolean vectorized = false; + private ReduceRecordSource[] sources; - List row = new ArrayList(Utilities.reduceFieldNameList.size()); + private final byte position = 0; - private DataOutputBuffer buffer; - private VectorizedRowBatch[] batches; - // number of columns pertaining to keys in a vectorized row batch - private int keysColumnOffset; - private final int BATCH_SIZE = VectorizedRowBatch.DEFAULT_SIZE; - private StructObjectInspector keyStructInspector; - private StructObjectInspector[] valueStructInspectors; - /* this is only used in the error code path */ - private List[] valueStringWriters; + private boolean abort; @Override void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrReporter, @@ -118,10 +75,6 @@ void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrRep ObjectCache cache = ObjectCacheFactory.getCache(jconf); - rowObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; - ObjectInspector[] valueObjectInspector = new ObjectInspector[Byte.MAX_VALUE]; - ObjectInspector keyObjectInspector; - redWork = (ReduceWork) cache.retrieve(REDUCE_PLAN_KEY); if (redWork == null) { redWork = Utilities.getReduceWork(jconf); @@ -131,95 +84,36 @@ void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrRep } reducer = redWork.getReducer(); - reducer.setParentOperators(null); // clear out any parents as reducer is the - // root - isTagged = redWork.getNeedsTagging(); - vectorized = redWork.getVectorMode(); + reducer.getParentOperators().clear(); + reducer.setParentOperators(null); // clear out any parents as reducer is the root - try { - keyTableDesc = redWork.getKeyDesc(); - inputKeyDeserializer = ReflectionUtils.newInstance(keyTableDesc - .getDeserializerClass(), null); - SerDeUtils.initializeSerDe(inputKeyDeserializer, null, keyTableDesc.getProperties(), null); - keyObjectInspector = inputKeyDeserializer.getObjectInspector(); - reducer.setGroupKeyObjectInspector(keyObjectInspector); - valueTableDesc = new TableDesc[redWork.getTagToValueDesc().size()]; - - if(vectorized) { - final int maxTags = redWork.getTagToValueDesc().size(); - keyStructInspector = (StructObjectInspector)keyObjectInspector; - batches = new VectorizedRowBatch[maxTags]; - valueStructInspectors = new StructObjectInspector[maxTags]; - valueStringWriters = new List[maxTags]; - keysColumnOffset = keyStructInspector.getAllStructFieldRefs().size(); - buffer = new DataOutputBuffer(); - } + int numTags = redWork.getTagToValueDesc().size(); - for (int tag = 0; tag < redWork.getTagToValueDesc().size(); tag++) { - // We should initialize the SerDe with the TypeInfo when available. - valueTableDesc[tag] = redWork.getTagToValueDesc().get(tag); - inputValueDeserializer[tag] = (SerDe) ReflectionUtils.newInstance( - valueTableDesc[tag].getDeserializerClass(), null); - SerDeUtils.initializeSerDe(inputValueDeserializer[tag], null, - valueTableDesc[tag].getProperties(), null); - valueObjectInspector[tag] = inputValueDeserializer[tag] - .getObjectInspector(); - - ArrayList ois = new ArrayList(); - - if(vectorized) { - /* vectorization only works with struct object inspectors */ - valueStructInspectors[tag] = (StructObjectInspector)valueObjectInspector[tag]; - - batches[tag] = VectorizedBatchUtil.constructVectorizedRowBatch(keyStructInspector, - valueStructInspectors[tag]); - final int totalColumns = keysColumnOffset + - valueStructInspectors[tag].getAllStructFieldRefs().size(); - valueStringWriters[tag] = new ArrayList(totalColumns); - valueStringWriters[tag].addAll(Arrays - .asList(VectorExpressionWriterFactory - .genVectorStructExpressionWritables(keyStructInspector))); - valueStringWriters[tag].addAll(Arrays - .asList(VectorExpressionWriterFactory - .genVectorStructExpressionWritables(valueStructInspectors[tag]))); - - /* - * The row object inspector used by ReduceWork needs to be a **standard** - * struct object inspector, not just any struct object inspector. - */ - ArrayList colNames = new ArrayList(); - List fields = keyStructInspector.getAllStructFieldRefs(); - for (StructField field: fields) { - colNames.add(Utilities.ReduceField.KEY.toString() + "." + field.getFieldName()); - ois.add(field.getFieldObjectInspector()); - } - fields = valueStructInspectors[tag].getAllStructFieldRefs(); - for (StructField field: fields) { - colNames.add(Utilities.ReduceField.VALUE.toString() + "." + field.getFieldName()); - ois.add(field.getFieldObjectInspector()); - } - rowObjectInspector[tag] = ObjectInspectorFactory - .getStandardStructObjectInspector(colNames, ois); - } else { - ois.add(keyObjectInspector); - ois.add(valueObjectInspector[tag]); - rowObjectInspector[tag] = ObjectInspectorFactory - .getStandardStructObjectInspector(Utilities.reduceFieldNameList, ois); - } + ObjectInspector[] ois = new ObjectInspector[numTags]; + sources = new ReduceRecordSource[numTags]; - } - } catch (Exception e) { - throw new RuntimeException(e); + for (int tag = 0; tag < redWork.getTagToValueDesc().size(); tag++) { + TableDesc keyTableDesc = redWork.getKeyDesc(); + TableDesc valueTableDesc = redWork.getTagToValueDesc().get(tag); + KeyValuesReader reader = + (KeyValuesReader) inputs.get(redWork.getTagToInput().get(tag)).getReader(); + + sources[tag] = new ReduceRecordSource(); + sources[tag].init(jconf, reducer, redWork.getVectorMode(), keyTableDesc, valueTableDesc, + reader, tag == position, (byte) tag, + redWork.getScratchColumnVectorTypes()); + ois[tag] = sources[tag].getObjectInspector(); } MapredContext.init(false, new JobConf(jconf)); ((TezContext) MapredContext.get()).setInputs(inputs); ((TezContext) MapredContext.get()).setTezProcessorContext(processorContext); + ((TezContext) MapredContext.get()).setRecordSources(sources); // initialize reduce operator tree try { l4j.info(reducer.dump(0)); - reducer.initialize(jconf, rowObjectInspector); + reducer.initialize(jconf, ois); // Initialization isn't finished until all parents of all operators // are initialized. For broadcast joins that means initializing the @@ -227,7 +121,6 @@ void init(JobConf jconf, ProcessorContext processorContext, MRTaskReporter mrRep List dummyOps = redWork.getDummyOps(); if (dummyOps != null) { for (Operator dummyOp : dummyOps){ - dummyOp.setExecContext(execContext); dummyOp.initialize(jconf, null); } } @@ -271,28 +164,12 @@ void run() throws Exception { ((TezKVOutputCollector) outMap.get(outputEntry.getKey())).initialize(); } - KeyValuesReader kvsReader; - try { - if(shuffleInputs.size() == 1){ - //no merging of inputs required - kvsReader = (KeyValuesReader) shuffleInputs.get(0).getReader(); - }else { - //get a sort merged input - kvsReader = new InputMerger(shuffleInputs); - } - } catch (Exception e) { - throw new IOException(e); - } - - while(kvsReader.next()){ - Object key = kvsReader.getCurrentKey(); - Iterable values = kvsReader.getCurrentValues(); - boolean needMore = processRows(key, values); - if(!needMore){ - break; + // run the operator pipeline + while (sources[position].pushRecord()) { + if (isLogInfoEnabled) { + logProgress(); } } - } /** @@ -302,209 +179,22 @@ void run() throws Exception { */ private List getShuffleInputs(Map inputs) { //the reduce plan inputs have tags, add all inputs that have tags - Map tag2input = redWork.getTagToInput(); + Map tagToinput = redWork.getTagToInput(); ArrayList shuffleInputs = new ArrayList(); - for(String inpStr : tag2input.values()){ + for(String inpStr : tagToinput.values()){ + if (inputs.get(inpStr) == null) { + throw new AssertionError("Cound not find input: " + inpStr); + } shuffleInputs.add(inputs.get(inpStr)); } return shuffleInputs; } - /** - * @param key - * @param values - * @return true if it is not done and can take more inputs - */ - private boolean processRows(Object key, Iterable values) { - if(reducer.getDone()){ - //done - no more records needed - return false; - } - - // reset the execContext for each new row - execContext.resetRow(); - - try { - BytesWritable keyWritable = (BytesWritable) key; - byte tag = 0; - - if (isTagged) { - // remove the tag from key coming out of reducer - // and store it in separate variable. - int size = keyWritable.getLength() - 1; - tag = keyWritable.getBytes()[size]; - keyWritable.setSize(size); - } - - //Set the key, check if this is a new group or same group - if (!keyWritable.equals(this.groupKey)) { - // If a operator wants to do some work at the beginning of a group - if (groupKey == null) { // the first group - this.groupKey = new BytesWritable(); - } else { - // If a operator wants to do some work at the end of a group - if(isLogTraceEnabled) { - l4j.trace("End Group"); - } - reducer.endGroup(); - } - - try { - this.keyObject = inputKeyDeserializer.deserialize(keyWritable); - } catch (Exception e) { - throw new HiveException( - "Hive Runtime Error: Unable to deserialize reduce input key from " - + Utilities.formatBinaryString(keyWritable.getBytes(), 0, - keyWritable.getLength()) + " with properties " - + keyTableDesc.getProperties(), e); - } - groupKey.set(keyWritable.getBytes(), 0, keyWritable.getLength()); - if (isLogTraceEnabled) { - l4j.trace("Start Group"); - } - reducer.setGroupKeyObject(keyObject); - reducer.startGroup(); - } - /* this.keyObject passed via reference */ - if(vectorized) { - return processVectors(values, tag); - } else { - return processKeyValues(values, tag); - } - } catch (Throwable e) { - abort = true; - if (e instanceof OutOfMemoryError) { - // Don't create a new object if we are already out of memory - throw (OutOfMemoryError) e; - } else { - l4j.fatal(StringUtils.stringifyException(e)); - throw new RuntimeException(e); - } - } - } - - private Object deserializeValue(BytesWritable valueWritable, byte tag) throws HiveException { - try { - return inputValueDeserializer[tag].deserialize(valueWritable); - } catch (SerDeException e) { - throw new HiveException( - "Hive Runtime Error: Unable to deserialize reduce input value (tag=" - + tag - + ") from " - + Utilities.formatBinaryString(valueWritable.getBytes(), 0, - valueWritable.getLength()) + " with properties " - + valueTableDesc[tag].getProperties(), e); - } - } - - /** - * @param values - * @return true if it is not done and can take more inputs - */ - private boolean processKeyValues(Iterable values, byte tag) throws HiveException { - - for (Object value : values) { - BytesWritable valueWritable = (BytesWritable) value; - - row.clear(); - row.add(this.keyObject); - row.add(deserializeValue(valueWritable, tag)); - - try { - reducer.processOp(row, tag); - } catch (Exception e) { - String rowString = null; - try { - rowString = SerDeUtils.getJSONString(row, rowObjectInspector[tag]); - } catch (Exception e2) { - rowString = "[Error getting row data with exception " - + StringUtils.stringifyException(e2) + " ]"; - } - throw new HiveException("Hive Runtime Error while processing row (tag=" - + tag + ") " + rowString, e); - } - if (isLogInfoEnabled) { - logProgress(); - } - } - return true; //give me more - } - - /** - * @param values - * @return true if it is not done and can take more inputs - */ - private boolean processVectors(Iterable values, byte tag) throws HiveException { - VectorizedRowBatch batch = batches[tag]; - batch.reset(); - - /* deserialize key into columns */ - VectorizedBatchUtil.addRowToBatchFrom(keyObject, keyStructInspector, - 0, 0, batch, buffer); - for(int i = 0; i < keysColumnOffset; i++) { - VectorizedBatchUtil.setRepeatingColumn(batch, i); - } - - int rowIdx = 0; - try { - for (Object value : values) { - /* deserialize value into columns */ - BytesWritable valueWritable = (BytesWritable) value; - Object valueObj = deserializeValue(valueWritable, tag); - - VectorizedBatchUtil.addRowToBatchFrom(valueObj, valueStructInspectors[tag], - rowIdx, keysColumnOffset, batch, buffer); - rowIdx++; - if (rowIdx >= BATCH_SIZE) { - VectorizedBatchUtil.setBatchSize(batch, rowIdx); - reducer.processOp(batch, tag); - rowIdx = 0; - if (isLogInfoEnabled) { - logProgress(); - } - } - } - if (rowIdx > 0) { - VectorizedBatchUtil.setBatchSize(batch, rowIdx); - reducer.processOp(batch, tag); - } - if (isLogInfoEnabled) { - logProgress(); - } - } catch (Exception e) { - String rowString = null; - try { - /* batch.toString depends on this */ - batch.setValueWriters(valueStringWriters[tag] - .toArray(new VectorExpressionWriter[0])); - rowString = batch.toString(); - } catch (Exception e2) { - rowString = "[Error getting row data with exception " - + StringUtils.stringifyException(e2) + " ]"; - } - throw new HiveException("Hive Runtime Error while processing vector batch (tag=" - + tag + ") " + rowString, e); - } - return true; // give me more - } - @Override void close(){ - // check if there are IOExceptions - if (!abort) { - abort = execContext.getIoCxt().getIOExceptions(); - } - try { - if (groupKey != null) { - // If a operator wants to do some work at the end of a group - if(isLogTraceEnabled) { - l4j.trace("End Group"); - } - reducer.endGroup(); - } - if (isLogInfoEnabled) { - logCloseInfo(); + for (ReduceRecordSource rs: sources) { + abort = abort && rs.close(); } reducer.close(abort); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java new file mode 100644 index 0000000..017a72a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordSource.java @@ -0,0 +1,401 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedBatchUtil; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch; +import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory; +import org.apache.hadoop.hive.ql.log.PerfLogger; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.SerDe; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.io.BytesWritable; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.tez.runtime.library.api.KeyValuesReader; + +/** + * Process input from tez LogicalInput and write output - for a map plan + * Just pump the records through the query plan. + */ +@SuppressWarnings("deprecation") +public class ReduceRecordSource implements RecordSource { + + public static final Log l4j = LogFactory.getLog(ReduceRecordSource.class); + + private static final String CLASS_NAME = ReduceRecordSource.class.getName(); + + private byte tag; + + private boolean abort = false; + + private static Deserializer inputKeyDeserializer; + + // Input value serde needs to be an array to support different SerDe + // for different tags + private SerDe inputValueDeserializer; + + TableDesc keyTableDesc; + TableDesc valueTableDesc; + + ObjectInspector rowObjectInspector; + private Operator reducer; + + private Object keyObject = null; + private BytesWritable groupKey; + + private boolean vectorized = false; + + List row = new ArrayList(Utilities.reduceFieldNameList.size()); + + private DataOutputBuffer keyBuffer; + private DataOutputBuffer valueBuffer; + private VectorizedRowBatchCtx batchContext; + private VectorizedRowBatch batch; + + // number of columns pertaining to keys in a vectorized row batch + private int keysColumnOffset; + private final int BATCH_SIZE = VectorizedRowBatch.DEFAULT_SIZE; + + private StructObjectInspector keyStructInspector; + private StructObjectInspector valueStructInspectors; + + /* this is only used in the error code path */ + private List valueStringWriters; + + private KeyValuesReader reader; + + private boolean handleGroupKey; + + private ObjectInspector valueObjectInspector; + + private final PerfLogger perfLogger = PerfLogger.getPerfLogger(); + + private Iterable valueWritables; + + private final boolean grouped = true; + + void init(JobConf jconf, Operator reducer, boolean vectorized, TableDesc keyTableDesc, + TableDesc valueTableDesc, KeyValuesReader reader, boolean handleGroupKey, byte tag, + Map> scratchColumnVectorTypes) + throws Exception { + + ObjectInspector keyObjectInspector; + + this.reducer = reducer; + this.vectorized = vectorized; + this.keyTableDesc = keyTableDesc; + this.reader = reader; + this.handleGroupKey = handleGroupKey; + this.tag = tag; + + try { + inputKeyDeserializer = ReflectionUtils.newInstance(keyTableDesc + .getDeserializerClass(), null); + SerDeUtils.initializeSerDe(inputKeyDeserializer, null, keyTableDesc.getProperties(), null); + keyObjectInspector = inputKeyDeserializer.getObjectInspector(); + + if(vectorized) { + keyStructInspector = (StructObjectInspector) keyObjectInspector; + keysColumnOffset = keyStructInspector.getAllStructFieldRefs().size(); + keyBuffer = new DataOutputBuffer(); + valueBuffer = new DataOutputBuffer(); + } + + // We should initialize the SerDe with the TypeInfo when available. + this.valueTableDesc = valueTableDesc; + inputValueDeserializer = (SerDe) ReflectionUtils.newInstance( + valueTableDesc.getDeserializerClass(), null); + SerDeUtils.initializeSerDe(inputValueDeserializer, null, + valueTableDesc.getProperties(), null); + valueObjectInspector = inputValueDeserializer.getObjectInspector(); + + ArrayList ois = new ArrayList(); + + if(vectorized) { + /* vectorization only works with struct object inspectors */ + valueStructInspectors = (StructObjectInspector) valueObjectInspector; + + final int totalColumns = keysColumnOffset + + valueStructInspectors.getAllStructFieldRefs().size(); + valueStringWriters = new ArrayList(totalColumns); + valueStringWriters.addAll(Arrays + .asList(VectorExpressionWriterFactory + .genVectorStructExpressionWritables(keyStructInspector))); + valueStringWriters.addAll(Arrays + .asList(VectorExpressionWriterFactory + .genVectorStructExpressionWritables(valueStructInspectors))); + + /* + * The row object inspector used by ReduceWork needs to be a **standard** + * struct object inspector, not just any struct object inspector. + */ + ArrayList colNames = new ArrayList(); + List fields = keyStructInspector.getAllStructFieldRefs(); + for (StructField field: fields) { + colNames.add(Utilities.ReduceField.KEY.toString() + "." + field.getFieldName()); + ois.add(field.getFieldObjectInspector()); + } + fields = valueStructInspectors.getAllStructFieldRefs(); + for (StructField field: fields) { + colNames.add(Utilities.ReduceField.VALUE.toString() + "." + field.getFieldName()); + ois.add(field.getFieldObjectInspector()); + } + rowObjectInspector = ObjectInspectorFactory.getStandardStructObjectInspector(colNames, ois); + + Map reduceShuffleScratchColumnTypeMap = + scratchColumnVectorTypes.get("_REDUCE_SHUFFLE_"); + batchContext = new VectorizedRowBatchCtx(); + batchContext.init(reduceShuffleScratchColumnTypeMap, (StructObjectInspector) rowObjectInspector); + batch = batchContext.createVectorizedRowBatch(); + } else { + ois.add(keyObjectInspector); + ois.add(valueObjectInspector); + rowObjectInspector = + ObjectInspectorFactory.getStandardStructObjectInspector(Utilities.reduceFieldNameList, + ois); + } + } catch (Throwable e) { + abort = true; + if (e instanceof OutOfMemoryError) { + // Don't create a new object if we are already out of memory + throw (OutOfMemoryError) e; + } else { + throw new RuntimeException("Reduce operator initialization failed", e); + } + } + perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INIT_OPERATORS); + } + + @Override + public final boolean isGrouped() { + return grouped; + } + + @Override + public boolean pushRecord() throws HiveException { + BytesWritable keyWritable; + + try { + if (!reader.next()) { + return false; + } else { + keyWritable = (BytesWritable) reader.getCurrentKey(); + valueWritables = reader.getCurrentValues(); + } + + //Set the key, check if this is a new group or same group + try { + keyObject = inputKeyDeserializer.deserialize(keyWritable); + } catch (Exception e) { + throw new HiveException("Hive Runtime Error: Unable to deserialize reduce input key from " + + Utilities.formatBinaryString(keyWritable.getBytes(), 0, keyWritable.getLength()) + + " with properties " + keyTableDesc.getProperties(), e); + } + + if (handleGroupKey && !keyWritable.equals(this.groupKey)) { + // If a operator wants to do some work at the beginning of a group + if (groupKey == null) { // the first group + this.groupKey = new BytesWritable(); + } else { + // If a operator wants to do some work at the end of a group + reducer.endGroup(); + } + + groupKey.set(keyWritable.getBytes(), 0, keyWritable.getLength()); + reducer.startGroup(); + reducer.setGroupKeyObject(keyObject); + } + + /* this.keyObject passed via reference */ + if(vectorized) { + processVectors(valueWritables, tag); + } else { + processKeyValues(valueWritables, tag); + } + return true; + } catch (Throwable e) { + abort = true; + if (e instanceof OutOfMemoryError) { + // Don't create a new object if we are already out of memory + throw (OutOfMemoryError) e; + } else { + l4j.fatal(StringUtils.stringifyException(e)); + throw new RuntimeException(e); + } + } + } + + private Object deserializeValue(BytesWritable valueWritable, byte tag) + throws HiveException { + + try { + return inputValueDeserializer.deserialize(valueWritable); + } catch (SerDeException e) { + throw new HiveException( + "Hive Runtime Error: Unable to deserialize reduce input value (tag=" + + tag + + ") from " + + Utilities.formatBinaryString(valueWritable.getBytes(), 0, valueWritable.getLength()) + + " with properties " + valueTableDesc.getProperties(), e); + } + } + + /** + * @param values + * @return true if it is not done and can take more inputs + */ + private void processKeyValues(Iterable values, byte tag) throws HiveException { + List passDownKey = null; + for (Object value : values) { + BytesWritable valueWritable = (BytesWritable) value; + + row.clear(); + if (passDownKey == null) { + row.add(this.keyObject); + } else { + row.add(passDownKey.get(0)); + } + if ((passDownKey == null) && (reducer instanceof CommonMergeJoinOperator)) { + passDownKey = + (List) ObjectInspectorUtils.copyToStandardObject(row, + reducer.getInputObjInspectors()[tag], ObjectInspectorCopyOption.WRITABLE); + row.remove(0); + row.add(0, passDownKey.get(0)); + } + + row.add(deserializeValue(valueWritable, tag)); + + try { + reducer.processOp(row, tag); + } catch (Exception e) { + String rowString = null; + try { + rowString = SerDeUtils.getJSONString(row, rowObjectInspector); + } catch (Exception e2) { + rowString = "[Error getting row data with exception " + + StringUtils.stringifyException(e2) + " ]"; + } + throw new HiveException("Hive Runtime Error while processing row (tag=" + + tag + ") " + rowString, e); + } + } + } + + /** + * @param values + * @return true if it is not done and can take more inputs + */ + private void processVectors(Iterable values, byte tag) throws HiveException { + /* deserialize key into columns */ + VectorizedBatchUtil.addRowToBatchFrom(keyObject, keyStructInspector, + 0, 0, batch, keyBuffer); + for(int i = 0; i < keysColumnOffset; i++) { + VectorizedBatchUtil.setRepeatingColumn(batch, i); + } + + int rowIdx = 0; + try { + for (Object value : values) { + /* deserialize value into columns */ + BytesWritable valueWritable = (BytesWritable) value; + Object valueObj = deserializeValue(valueWritable, tag); + + VectorizedBatchUtil.addRowToBatchFrom(valueObj, valueStructInspectors, + rowIdx, keysColumnOffset, batch, valueBuffer); + rowIdx++; + if (rowIdx >= BATCH_SIZE) { + VectorizedBatchUtil.setBatchSize(batch, rowIdx); + reducer.processOp(batch, tag); + + // Reset just the value columns and value buffer. + for (int i = keysColumnOffset; i < batch.numCols; i++) { + batch.cols[i].reset(); + } + valueBuffer.reset(); + rowIdx = 0; + } + } + if (rowIdx > 0) { + // Flush final partial batch. + VectorizedBatchUtil.setBatchSize(batch, rowIdx); + reducer.processOp(batch, tag); + } + batch.reset(); + keyBuffer.reset(); + valueBuffer.reset(); + } catch (Exception e) { + String rowString = null; + try { + /* batch.toString depends on this */ + batch.setValueWriters(valueStringWriters + .toArray(new VectorExpressionWriter[0])); + rowString = batch.toString(); + } catch (Exception e2) { + rowString = "[Error getting row data with exception " + + StringUtils.stringifyException(e2) + " ]"; + } + throw new HiveException("Hive Runtime Error while processing vector batch (tag=" + + tag + ") " + rowString, e); + } + } + + boolean close() throws Exception { + try { + if (handleGroupKey && groupKey != null) { + // If a operator wants to do some work at the end of a group + reducer.endGroup(); + } + } catch (Exception e) { + if (!abort) { + // signal new failure to map-reduce + l4j.error("Hit error while closing operators - failing tree"); + throw new RuntimeException("Hive Runtime Error while closing operators: " + + e.getMessage(), e); + } + } + return abort; + } + + public ObjectInspector getObjectInspector() { + return rowObjectInspector; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezContext.java index 010a6f4..62f1aa4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezContext.java @@ -37,6 +37,8 @@ private ProcessorContext processorContext; + private RecordSource[] sources; + public TezContext(boolean isMap, JobConf jobConf) { super(isMap, jobConf); } @@ -70,4 +72,12 @@ public void setTezProcessorContext(ProcessorContext processorContext) { public ProcessorContext getTezProcessorContext() { return processorContext; } + + public RecordSource[] getRecordSources() { + return sources; + } + + public void setRecordSources(RecordSource[] sources) { + this.sources = sources; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java index 29f6bfa..48c6b6a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java @@ -78,7 +78,7 @@ public void run() { try { for (TezSessionState s: TezSessionState.getOpenSessions()) { System.err.println("Shutting down tez session."); - TezSessionPoolManager.getInstance().close(s); + TezSessionPoolManager.getInstance().close(s, false); } } catch (Exception e) { // ignore @@ -113,6 +113,7 @@ public int monitorExecution(final DAGClient dagClient, HiveTxnManager txnMgr, String lastReport = null; Set opts = new HashSet(); Heartbeater heartbeater = new Heartbeater(txnMgr, conf); + long startTime = 0; shutdownList.add(dagClient); @@ -145,6 +146,7 @@ public int monitorExecution(final DAGClient dagClient, HiveTxnManager txnMgr, for (String s: progressMap.keySet()) { perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_RUN_VERTEX + s); } + startTime = System.currentTimeMillis(); running = true; } @@ -152,7 +154,8 @@ public int monitorExecution(final DAGClient dagClient, HiveTxnManager txnMgr, break; case SUCCEEDED: lastReport = printStatus(progressMap, lastReport, console); - console.printInfo("Status: Finished successfully"); + double duration = (System.currentTimeMillis() - startTime)/1000.0; + console.printInfo("Status: Finished successfully in " + String.format("%.2f seconds", duration)); running = false; done = true; break; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java index 1268086..42c7d37 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezProcessor.java @@ -17,6 +17,14 @@ */ package org.apache.hadoop.hive.ql.exec.tez; +import java.io.IOException; +import java.text.NumberFormat; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -26,6 +34,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.tez.common.TezUtils; import org.apache.tez.mapreduce.input.MRInputLegacy; +import org.apache.tez.mapreduce.input.MultiMRInput; import org.apache.tez.mapreduce.processor.MRTaskReporter; import org.apache.tez.runtime.api.AbstractLogicalIOProcessor; import org.apache.tez.runtime.api.Event; @@ -34,11 +43,6 @@ import org.apache.tez.runtime.api.ProcessorContext; import org.apache.tez.runtime.library.api.KeyValueWriter; -import java.io.IOException; -import java.text.NumberFormat; -import java.util.List; -import java.util.Map; - /** * Hive processor for Tez that forms the vertices in Tez and processes the data. * Does what ExecMapper and ExecReducer does for hive in MR framework. @@ -90,7 +94,8 @@ public void initialize() throws IOException { perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_INITIALIZE_PROCESSOR); Configuration conf = TezUtils.createConfFromUserPayload(getContext().getUserPayload()); this.jobConf = new JobConf(conf); - setupMRLegacyConfigs(getContext()); + this.processorContext = getContext(); + setupMRLegacyConfigs(processorContext); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_INITIALIZE_PROCESSOR); } @@ -130,12 +135,6 @@ public void run(Map inputs, Map out if (isMap) { rproc = new MapRecordProcessor(jobConf); - MRInputLegacy mrInput = getMRInput(inputs); - try { - mrInput.init(); - } catch (IOException e) { - throw new RuntimeException("Failed while initializing MRInput", e); - } } else { rproc = new ReduceRecordProcessor(); } @@ -148,6 +147,7 @@ protected void initializeAndRunProcessor(Map inputs, throws Exception { Throwable originalThrowable = null; try { + // Outputs will be started later by the individual Processors. TezCacheAccess cacheAccess = TezCacheAccess.createInstance(jobConf); // Start the actual Inputs. After MRInput initialization. for (Map.Entry inputEntry : inputs.entrySet()) { @@ -155,13 +155,10 @@ protected void initializeAndRunProcessor(Map inputs, LOG.info("Input: " + inputEntry.getKey() + " is not cached"); inputEntry.getValue().start(); } else { - LOG.info("Input: " + inputEntry.getKey() + - " is already cached. Skipping start"); + LOG.info("Input: " + inputEntry.getKey() + " is already cached. Skipping start"); } } - // Outputs will be started later by the individual Processors. - MRTaskReporter mrReporter = new MRTaskReporter(getContext()); rproc.init(jobConf, getContext(), mrReporter, inputs, outputs); rproc.run(); @@ -214,19 +211,4 @@ public void collect(Object key, Object value) throws IOException { writer.write(key, value); } } - - static MRInputLegacy getMRInput(Map inputs) { - //there should be only one MRInput - MRInputLegacy theMRInput = null; - for(LogicalInput inp : inputs.values()){ - if(inp instanceof MRInputLegacy){ - if(theMRInput != null){ - throw new IllegalArgumentException("Only one MRInput is expected"); - } - //a better logic would be to find the alias - theMRInput = (MRInputLegacy)inp; - } - } - return theMRInput; - } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java index 0d0ac41..a4fd36d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java @@ -168,10 +168,10 @@ public void returnSession(TezSessionState tezSessionState) // session in the SessionState } - public void close(TezSessionState tezSessionState) throws Exception { + public void close(TezSessionState tezSessionState, boolean keepTmpDir) throws Exception { LOG.info("Closing tez session default? " + tezSessionState.isDefault()); if (!tezSessionState.isDefault()) { - tezSessionState.close(false); + tezSessionState.close(keepTmpDir); } } @@ -262,19 +262,24 @@ public TezSessionState getSession(TezSessionState session, HiveConf conf, } if (session != null) { - close(session); + close(session, false); } return getSession(conf, doOpen, forceCreate); } - public void closeAndOpen(TezSessionState sessionState, HiveConf conf) + public void closeAndOpen(TezSessionState sessionState, HiveConf conf, boolean keepTmpDir) throws Exception { + closeAndOpen(sessionState, conf, null, keepTmpDir); + } + + public void closeAndOpen(TezSessionState sessionState, HiveConf conf, + String[] additionalFiles, boolean keepTmpDir) throws Exception { HiveConf sessionConf = sessionState.getConf(); if (sessionConf != null && sessionConf.get("tez.queue.name") != null) { conf.set("tez.queue.name", sessionConf.get("tez.queue.name")); } - close(sessionState); - sessionState.open(conf); + close(sessionState, keepTmpDir); + sessionState.open(conf, additionalFiles); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 428e0ff..93e0fac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.MergeJoinWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceWork; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; @@ -55,6 +56,7 @@ import org.apache.tez.dag.api.Edge; import org.apache.tez.dag.api.GroupInputEdge; import org.apache.tez.dag.api.SessionNotRunning; +import org.apache.tez.dag.api.TezUncheckedException; import org.apache.tez.dag.api.Vertex; import org.apache.tez.dag.api.VertexGroup; import org.apache.tez.dag.api.client.DAGClient; @@ -124,14 +126,11 @@ public int execute(DriverContext driverContext) { // create the tez tmp dir scratchDir = utils.createTezDir(scratchDir, conf); - if (!session.isOpen()) { - // can happen if the user sets the tez flag after the session was - // established - LOG.info("Tez session hasn't been created yet. Opening session"); - session.open(conf, inputOutputJars); - } else { - session.refreshLocalResourcesFromConf(conf); - } + Map inputOutputLocalResources = + getExtraLocalResources(jobConf, scratchDir, inputOutputJars); + + // Ensure the session is open and has the necessary local resources + updateSession(session, jobConf, scratchDir, inputOutputJars, inputOutputLocalResources); List additionalLr = session.getLocalizedResources(); @@ -153,8 +152,12 @@ public int execute(DriverContext driverContext) { // next we translate the TezWork to a Tez DAG DAG dag = build(jobConf, work, scratchDir, appJarLr, additionalLr, ctx); + // Add the extra resources to the dag + addExtraResourcesToDag(session, dag, inputOutputJars, inputOutputLocalResources); + // submit will send the job to the cluster and start executing - client = submit(jobConf, dag, scratchDir, appJarLr, session, additionalLr); + client = submit(jobConf, dag, scratchDir, appJarLr, session, + additionalLr, inputOutputJars, inputOutputLocalResources); // finally monitor will print progress until the job is done TezJobMonitor monitor = new TezJobMonitor(); @@ -195,6 +198,63 @@ public int execute(DriverContext driverContext) { return rc; } + /** + * Converted the list of jars into local resources + */ + Map getExtraLocalResources(JobConf jobConf, Path scratchDir, + String[] inputOutputJars) throws Exception { + final Map resources = new HashMap(); + final List localResources = utils.localizeTempFiles( + scratchDir.toString(), jobConf, inputOutputJars); + if (null != localResources) { + for (LocalResource lr : localResources) { + resources.put(utils.getBaseName(lr), lr); + } + } + return resources; + } + + /** + * Ensures that the Tez Session is open and the AM has all necessary jars configured. + */ + void updateSession(TezSessionState session, + JobConf jobConf, Path scratchDir, String[] inputOutputJars, + Map extraResources) throws Exception { + final boolean missingLocalResources = !session + .hasResources(inputOutputJars); + + if (!session.isOpen()) { + // can happen if the user sets the tez flag after the session was + // established + LOG.info("Tez session hasn't been created yet. Opening session"); + session.open(conf, inputOutputJars); + } else { + LOG.info("Session is already open"); + + // Ensure the open session has the necessary resources (StorageHandler) + if (missingLocalResources) { + LOG.info("Tez session missing resources," + + " adding additional necessary resources"); + session.getSession().addAppMasterLocalFiles(extraResources); + } + + session.refreshLocalResourcesFromConf(conf); + } + } + + /** + * Adds any necessary resources that must be localized in each vertex to the DAG. + */ + void addExtraResourcesToDag(TezSessionState session, DAG dag, + String[] inputOutputJars, + Map inputOutputLocalResources) throws Exception { + if (!session.hasResources(inputOutputJars)) { + if (null != inputOutputLocalResources) { + dag.addTaskLocalFiles(inputOutputLocalResources); + } + } + } + DAG build(JobConf conf, TezWork work, Path scratchDir, LocalResource appJarLr, List additionalLr, Context ctx) throws Exception { @@ -254,15 +314,16 @@ DAG build(JobConf conf, TezWork work, Path scratchDir, for (BaseWork v: children) { // finally we can create the grouped edge GroupInputEdge e = utils.createEdge(group, parentConf, - workToVertex.get(v), work.getEdgeProperty(w, v)); + workToVertex.get(v), work.getEdgeProperty(w, v), work.getVertexType(v)); dag.addEdge(e); } } else { // Regular vertices JobConf wxConf = utils.initializeVertexConf(conf, ctx, w); - Vertex wx = utils.createVertex(wxConf, w, scratchDir, appJarLr, - additionalLr, fs, ctx, !isFinal, work); + Vertex wx = + utils.createVertex(wxConf, w, scratchDir, appJarLr, additionalLr, fs, ctx, !isFinal, + work, work.getVertexType(w)); dag.addVertex(wx); utils.addCredentials(w, dag); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.TEZ_CREATE_VERTEX + w.getName()); @@ -276,7 +337,7 @@ DAG build(JobConf conf, TezWork work, Path scratchDir, TezEdgeProperty edgeProp = work.getEdgeProperty(w, v); - e = utils.createEdge(wxConf, wx, workToVertex.get(v), edgeProp); + e = utils.createEdge(wxConf, wx, workToVertex.get(v), edgeProp, work.getVertexType(v)); dag.addEdge(e); } } @@ -287,7 +348,8 @@ DAG build(JobConf conf, TezWork work, Path scratchDir, DAGClient submit(JobConf conf, DAG dag, Path scratchDir, LocalResource appJarLr, TezSessionState sessionState, - List additionalLr) + List additionalLr, String[] inputOutputJars, + Map inputOutputLocalResources) throws Exception { perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.TEZ_SUBMIT_DAG); @@ -308,7 +370,7 @@ DAGClient submit(JobConf conf, DAG dag, Path scratchDir, console.printInfo("Tez session was closed. Reopening..."); // close the old one, but keep the tmp files around - TezSessionPoolManager.getInstance().closeAndOpen(sessionState, this.conf); + TezSessionPoolManager.getInstance().closeAndOpen(sessionState, this.conf, inputOutputJars, true); console.printInfo("Session re-established."); dagClient = sessionState.getSession().submitDAG(dag); @@ -326,6 +388,9 @@ int close(TezWork work, int rc) { try { List ws = work.getAllWork(); for (BaseWork w: ws) { + if (w instanceof MergeJoinWork) { + w = ((MergeJoinWork) w).getMainWork(); + } for (Operator op: w.getAllOperators()) { op.jobClose(conf, rc == 0); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/InputMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/InputMerger.java index a977319..e69de29 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/InputMerger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/InputMerger.java @@ -1,109 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.exec.tez.tools; - -import java.io.IOException; -import java.util.Comparator; -import java.util.List; -import java.util.PriorityQueue; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.tez.ReduceRecordProcessor; -import org.apache.hadoop.io.BinaryComparable; -import org.apache.tez.runtime.api.Input; -import org.apache.tez.runtime.api.LogicalInput; -import org.apache.tez.runtime.library.api.KeyValuesReader; - -/** - * A KeyValuesReader implementation that returns a sorted stream of key-values - * by doing a sorted merge of the key-value in LogicalInputs. - * Tags are in the last byte of the key, so no special handling for tags is required. - * Uses a priority queue to pick the KeyValuesReader of the input that is next in - * sort order. - */ -public class InputMerger extends KeyValuesReader { - - public static final Log l4j = LogFactory.getLog(ReduceRecordProcessor.class); - private PriorityQueue pQueue = null; - private KeyValuesReader nextKVReader = null; - - public InputMerger(List shuffleInputs) throws Exception { - //get KeyValuesReaders from the LogicalInput and add them to priority queue - int initialCapacity = shuffleInputs.size(); - pQueue = new PriorityQueue(initialCapacity, new KVReaderComparator()); - for(Input input : shuffleInputs){ - addToQueue((KeyValuesReader)input.getReader()); - } - } - - /** - * Add KeyValuesReader to queue if it has more key-values - * @param kvsReadr - * @throws IOException - */ - private void addToQueue(KeyValuesReader kvsReadr) throws IOException{ - if(kvsReadr.next()){ - pQueue.add(kvsReadr); - } - } - - /** - * @return true if there are more key-values and advances to next key-values - * @throws IOException - */ - public boolean next() throws IOException { - //add the previous nextKVReader back to queue - if(nextKVReader != null){ - addToQueue(nextKVReader); - } - - //get the new nextKVReader with lowest key - nextKVReader = pQueue.poll(); - return nextKVReader != null; - } - - public Object getCurrentKey() throws IOException { - return nextKVReader.getCurrentKey(); - } - - public Iterable getCurrentValues() throws IOException { - return nextKVReader.getCurrentValues(); - } - - /** - * Comparator that compares KeyValuesReader on their current key - */ - class KVReaderComparator implements Comparator { - - @Override - public int compare(KeyValuesReader kvReadr1, KeyValuesReader kvReadr2) { - try { - BinaryComparable key1 = (BinaryComparable) kvReadr1.getCurrentKey(); - BinaryComparable key2 = (BinaryComparable) kvReadr2.getCurrentKey(); - return key1.compareTo(key2); - } catch (IOException e) { - l4j.error("Caught exception while reading shuffle input", e); - //die! - throw new RuntimeException(e); - } - } - } - - -} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValueInputMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValueInputMerger.java new file mode 100644 index 0000000..516722d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValueInputMerger.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez.tools; + +import java.io.IOException; +import java.util.Comparator; +import java.util.List; +import java.util.PriorityQueue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.BinaryComparable; +import org.apache.tez.runtime.library.api.KeyValueReader; + +/** + * A KeyValuesReader implementation that returns a sorted stream of key-values + * by doing a sorted merge of the key-value in LogicalInputs. + * Tags are in the last byte of the key, so no special handling for tags is required. + * Uses a priority queue to pick the KeyValuesReader of the input that is next in + * sort order. + */ +public class KeyValueInputMerger extends KeyValueReader { + + public static final Log l4j = LogFactory.getLog(KeyValueInputMerger.class); + private PriorityQueue pQueue = null; + private KeyValueReader nextKVReader = null; + + public KeyValueInputMerger(List multiMRInputs) throws Exception { + //get KeyValuesReaders from the LogicalInput and add them to priority queue + int initialCapacity = multiMRInputs.size(); + pQueue = new PriorityQueue(initialCapacity, new KVReaderComparator()); + l4j.info("Initialized the priority queue with multi mr inputs: " + multiMRInputs.size()); + for (KeyValueReader input : multiMRInputs) { + addToQueue(input); + } + } + + /** + * Add KeyValueReader to queue if it has more key-value + * + * @param kvReader + * @throws IOException + */ + private void addToQueue(KeyValueReader kvReader) throws IOException { + if (kvReader.next()) { + pQueue.add(kvReader); + } + } + + /** + * @return true if there are more key-values and advances to next key-values + * @throws IOException + */ + @Override + public boolean next() throws IOException { + //add the previous nextKVReader back to queue + if(nextKVReader != null){ + addToQueue(nextKVReader); + } + + //get the new nextKVReader with lowest key + nextKVReader = pQueue.poll(); + return nextKVReader != null; + } + + @Override + public Object getCurrentKey() throws IOException { + return nextKVReader.getCurrentKey(); + } + + @Override + public Object getCurrentValue() throws IOException { + return nextKVReader.getCurrentValue(); + } + + /** + * Comparator that compares KeyValuesReader on their current key + */ + class KVReaderComparator implements Comparator { + + @Override + public int compare(KeyValueReader kvReadr1, KeyValueReader kvReadr2) { + try { + BinaryComparable key1 = (BinaryComparable) kvReadr1.getCurrentValue(); + BinaryComparable key2 = (BinaryComparable) kvReadr2.getCurrentValue(); + return key1.compareTo(key2); + } catch (IOException e) { + l4j.error("Caught exception while reading shuffle input", e); + //die! + throw new RuntimeException(e); + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java new file mode 100644 index 0000000..9bc6418 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/KeyValuesInputMerger.java @@ -0,0 +1,208 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec.tez.tools; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.PriorityQueue; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.BinaryComparable; +import org.apache.tez.runtime.api.Input; +import org.apache.tez.runtime.library.api.KeyValuesReader; + +/** + * A KeyValuesReader implementation that returns a sorted stream of key-values + * by doing a sorted merge of the key-value in LogicalInputs. + * Tags are in the last byte of the key, so no special handling for tags is required. + * Uses a priority queue to pick the KeyValuesReader of the input that is next in + * sort order. + */ +public class KeyValuesInputMerger extends KeyValuesReader { + + private class KeyValuesIterable implements Iterable { + + KeyValuesIterator currentIterator = null; + + KeyValuesIterable(int size) { + currentIterator = new KeyValuesIterator(size); + } + + @Override + public Iterator iterator() { + return currentIterator; + } + + public void init(List readerList) { + currentIterator.init(readerList); + } + } + + private class KeyValuesIterator implements Iterator { + KeyValuesReader[] readerArray = null; + Iterator currentIterator = null; + int currentIndex = 0; + int loadedSize = 0; + + KeyValuesIterator(int size) { + readerArray = new KeyValuesReader[size]; + } + + public void init(List readerList) { + for (int i = 0; i < readerList.size(); i++) { + readerArray[i] = null; + } + loadedSize = 0; + for (KeyValuesReader kvsReader : readerList) { + readerArray[loadedSize] = kvsReader; + loadedSize++; + } + currentIterator = null; + currentIndex = 0; + } + + @Override + public boolean hasNext() { + if ((currentIterator == null) || (currentIterator.hasNext() == false)) { + if (currentIndex == loadedSize) { + return false; + } + + try { + if (readerArray[currentIndex] == null) { + return false; + } + currentIterator = readerArray[currentIndex].getCurrentValues().iterator(); + currentIndex++; + return currentIterator.hasNext(); + } catch (IOException e) { + return false; + } + } + + return true; + } + + @Override + public Object next() { + l4j.info("next called on " + currentIterator); + return currentIterator.next(); + } + + @Override + public void remove() { + // nothing to do + } + } + + public static final Log l4j = LogFactory.getLog(KeyValuesInputMerger.class); + private PriorityQueue pQueue = null; + private final List nextKVReaders = new ArrayList(); + KeyValuesIterable kvsIterable = null; + + public KeyValuesInputMerger(List shuffleInputs) throws Exception { + //get KeyValuesReaders from the LogicalInput and add them to priority queue + int initialCapacity = shuffleInputs.size(); + kvsIterable = new KeyValuesIterable(initialCapacity); + pQueue = new PriorityQueue(initialCapacity, new KVReaderComparator()); + for(Input input : shuffleInputs){ + addToQueue((KeyValuesReader)input.getReader()); + } + } + + /** + * Add KeyValuesReader to queue if it has more key-values + * @param kvsReadr + * @throws IOException + */ + private void addToQueue(KeyValuesReader kvsReadr) throws IOException{ + if(kvsReadr.next()){ + pQueue.add(kvsReadr); + } + } + + /** + * @return true if there are more key-values and advances to next key-values + * @throws IOException + */ + @Override + public boolean next() throws IOException { + //add the previous nextKVReader back to queue + if (!nextKVReaders.isEmpty()) { + for (KeyValuesReader kvReader : nextKVReaders) { + addToQueue(kvReader); + } + nextKVReaders.clear(); + } + + KeyValuesReader nextKVReader = null; + //get the new nextKVReader with lowest key + nextKVReader = pQueue.poll(); + if (nextKVReader != null) { + nextKVReaders.add(nextKVReader); + } + + while (pQueue.peek() != null) { + KeyValuesReader equalValueKVReader = pQueue.poll(); + if (pQueue.comparator().compare(nextKVReader, equalValueKVReader) == 0) { + nextKVReaders.add(equalValueKVReader); + } else { + pQueue.add(equalValueKVReader); + break; + } + } + return !(nextKVReaders.isEmpty()); + } + + @Override + public Object getCurrentKey() throws IOException { + // return key from any of the readers + return nextKVReaders.get(0).getCurrentKey(); + } + + @Override + public Iterable getCurrentValues() throws IOException { + kvsIterable.init(nextKVReaders); + return kvsIterable; + } + + /** + * Comparator that compares KeyValuesReader on their current key + */ + class KVReaderComparator implements Comparator { + + @Override + public int compare(KeyValuesReader kvReadr1, KeyValuesReader kvReadr2) { + try { + BinaryComparable key1 = (BinaryComparable) kvReadr1.getCurrentKey(); + BinaryComparable key2 = (BinaryComparable) kvReadr2.getCurrentKey(); + return key1.compareTo(key2); + } catch (IOException e) { + l4j.error("Caught exception while reading shuffle input", e); + //die! + throw new RuntimeException(e); + } + } + } + + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/TezMergedLogicalInput.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/TezMergedLogicalInput.java index 9801a0d..277be4c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/TezMergedLogicalInput.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/tools/TezMergedLogicalInput.java @@ -40,7 +40,7 @@ public TezMergedLogicalInput(MergedInputContext context, List inputs) { @Override public Reader getReader() throws Exception { - return new InputMerger(getInputs()); + return new KeyValuesInputMerger(getInputs()); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java new file mode 100644 index 0000000..d05cc23 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorAppMasterEventOperator.java @@ -0,0 +1,128 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec.vector; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; +import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.AppMasterEventDesc; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeStats; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.io.ObjectWritable; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; + +/** + * App Master Event operator implementation. + **/ +public class VectorAppMasterEventOperator extends AppMasterEventOperator { + + private static final long serialVersionUID = 1L; + + protected transient Object[] singleRow; + + protected transient VectorExpressionWriter[] valueWriters; + + public VectorAppMasterEventOperator(VectorizationContext context, + OperatorDesc conf) { + super(); + this.conf = (AppMasterEventDesc) conf; + } + + public VectorAppMasterEventOperator() { + } + + @Override + public void initializeOp(Configuration hconf) throws HiveException { + super.initializeOp(hconf); + valueWriters = VectorExpressionWriterFactory.getExpressionWriters( + (StructObjectInspector) inputObjInspectors[0]); + singleRow = new Object[valueWriters.length]; + } + + @Override + public void processOp(Object data, int tag) throws HiveException { + + VectorizedRowBatch vrg = (VectorizedRowBatch) data; + + Writable [] records = null; + Writable recordValue = null; + boolean vectorizedSerde = false; + + try { + if (serializer instanceof VectorizedSerde) { + recordValue = ((VectorizedSerde) serializer).serializeVector(vrg, + inputObjInspectors[0]); + records = (Writable[]) ((ObjectWritable) recordValue).get(); + vectorizedSerde = true; + } + } catch (SerDeException e1) { + throw new HiveException(e1); + } + + for (int i = 0; i < vrg.size; i++) { + Writable row = null; + if (vectorizedSerde) { + row = records[i]; + } else { + if (vrg.valueWriters == null) { + vrg.setValueWriters(this.valueWriters); + } + try { + row = serializer.serialize(getRowObject(vrg, i), inputObjInspectors[0]); + } catch (SerDeException ex) { + throw new HiveException(ex); + } + } + try { + row.write(buffer); + if (buffer.getLength() > MAX_SIZE) { + LOG.info("Disabling AM events. Buffer size too large: " + buffer.getLength()); + hasReachedMaxSize = true; + buffer = null; + } + } catch (Exception e) { + throw new HiveException(e); + } + } + } + + private Object[] getRowObject(VectorizedRowBatch vrg, int rowIndex) + throws HiveException { + int batchIndex = rowIndex; + if (vrg.selectedInUse) { + batchIndex = vrg.selected[rowIndex]; + } + for (int i = 0; i < vrg.projectionSize; i++) { + ColumnVector vectorColumn = vrg.cols[vrg.projectedColumns[i]]; + singleRow[i] = vrg.valueWriters[i].writeValue(vectorColumn, batchIndex); + } + return singleRow; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnAssignFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnAssignFactory.java index e6e9d04..14ef79e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnAssignFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorColumnAssignFactory.java @@ -24,7 +24,9 @@ import java.util.Map; import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.common.type.HiveVarchar; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DateWritable; @@ -404,8 +406,7 @@ public void assignObjectValue(Object val, int destIndex) throws HiveException { public void assignObjectValue(Object val, int destIndex) throws HiveException { if (val == null) { assignNull(destIndex); - } - else { + } else { Text bw = (Text) val; byte[] bytes = bw.getBytes(); assignBytes(bytes, 0, bw.getLength(), destIndex); @@ -413,6 +414,35 @@ public void assignObjectValue(Object val, int destIndex) throws HiveException { } }.init(outputBatch, (BytesColumnVector) destCol); break; + case VARCHAR: + outVCA = new VectorBytesColumnAssign() { + @Override + public void assignObjectValue(Object val, int destIndex) throws HiveException { + if (val == null) { + assignNull(destIndex); + } else { + HiveVarchar hiveVarchar = (HiveVarchar) val; + byte[] bytes = hiveVarchar.getValue().getBytes(); + assignBytes(bytes, 0, bytes.length, destIndex); + } + } + }.init(outputBatch, (BytesColumnVector) destCol); + break; + case CHAR: + outVCA = new VectorBytesColumnAssign() { + @Override + public void assignObjectValue(Object val, int destIndex) throws HiveException { + if (val == null) { + assignNull(destIndex); + } else { + // We store CHAR type stripped of pads. + HiveChar hiveChar = (HiveChar) val; + byte[] bytes = hiveChar.getStrippedValue().getBytes(); + assignBytes(bytes, 0, bytes.length, destIndex); + } + } + }.init(outputBatch, (BytesColumnVector) destCol); + break; default: throw new HiveException("Incompatible Bytes vector column and primitive category " + category); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExpressionDescriptor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExpressionDescriptor.java index ec1b0ed..bb18b32 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExpressionDescriptor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExpressionDescriptor.java @@ -67,6 +67,7 @@ DATE (0x040), TIMESTAMP (0x080), DATETIME_FAMILY (DATE.value | TIMESTAMP.value), + INT_TIMESTAMP_FAMILY (INT_FAMILY.value | TIMESTAMP.value), INT_DATETIME_FAMILY (INT_FAMILY.value | DATETIME_FAMILY.value), STRING_DATETIME_FAMILY (STRING_FAMILY.value | DATETIME_FAMILY.value), ALL_FAMILY (0xFFF); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractOperator.java index b573e3e..1ddcbc6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorExtractOperator.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.exec.vector; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -45,7 +46,8 @@ private int keyColCount; private int valueColCount; - private transient int [] projectedColumns = null; + private transient VectorizedRowBatch outputBatch; + private transient int remainingColCount; public VectorExtractOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { @@ -57,26 +59,25 @@ public VectorExtractOperator() { super(); } - private StructObjectInspector makeStandardStructObjectInspector(StructObjectInspector structObjectInspector) { - List fields = structObjectInspector.getAllStructFieldRefs(); + @Override + protected void initializeOp(Configuration hconf) throws HiveException { + StructObjectInspector structInputObjInspector = (StructObjectInspector) inputObjInspectors[0]; + List fields = structInputObjInspector.getAllStructFieldRefs(); ArrayList ois = new ArrayList(); ArrayList colNames = new ArrayList(); - for (StructField field: fields) { - colNames.add(field.getFieldName()); + for (int i = keyColCount; i < fields.size(); i++) { + StructField field = fields.get(i); + String fieldName = field.getFieldName(); + + // Remove "VALUE." prefix. + int dotIndex = fieldName.indexOf("."); + colNames.add(fieldName.substring(dotIndex + 1)); ois.add(field.getFieldObjectInspector()); } - return ObjectInspectorFactory + outputObjInspector = ObjectInspectorFactory .getStandardStructObjectInspector(colNames, ois); - } - - @Override - protected void initializeOp(Configuration hconf) throws HiveException { - outputObjInspector = inputObjInspectors[0]; - LOG.info("VectorExtractOperator class of outputObjInspector is " + outputObjInspector.getClass().getName()); - projectedColumns = new int [valueColCount]; - for (int i = 0; i < valueColCount; i++) { - projectedColumns[i] = keyColCount + i; - } + remainingColCount = fields.size() - keyColCount; + outputBatch = new VectorizedRowBatch(remainingColCount); initializeChildren(hconf); } @@ -86,20 +87,16 @@ public void setKeyAndValueColCounts(int keyColCount, int valueColCount) { } @Override - // Evaluate vectorized batches of rows and forward them. + // Remove the key columns and forward the values (and scratch columns). public void processOp(Object row, int tag) throws HiveException { - VectorizedRowBatch vrg = (VectorizedRowBatch) row; + VectorizedRowBatch inputBatch = (VectorizedRowBatch) row; - // Project away the key columns... - int[] originalProjections = vrg.projectedColumns; - int originalProjectionSize = vrg.projectionSize; - vrg.projectionSize = valueColCount; - vrg.projectedColumns = this.projectedColumns; - - forward(vrg, outputObjInspector); + // Copy references to the input columns array starting after the keys... + for (int i = 0; i < remainingColCount; i++) { + outputBatch.cols[i] = inputBatch.cols[keyColCount + i]; + } + outputBatch.size = inputBatch.size; - // Revert the projected columns back, because vrg will be re-used. - vrg.projectionSize = originalProjectionSize; - vrg.projectedColumns = originalProjections; + forward(outputBatch, outputObjInspector); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java index e546dd1..ea32f33 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFileSinkOperator.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.exec.vector; -import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; @@ -27,16 +25,7 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.SerDeStats; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.io.ObjectWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; /** * File Sink operator implementation. @@ -69,113 +58,10 @@ protected void initializeOp(Configuration hconf) throws HiveException { @Override public void processOp(Object data, int tag) throws HiveException { - VectorizedRowBatch vrg = (VectorizedRowBatch)data; - - Writable [] records = null; - boolean vectorizedSerde = false; - try { - if (serializer instanceof VectorizedSerde) { - recordValue = ((VectorizedSerde) serializer).serializeVector(vrg, - inputObjInspectors[0]); - records = (Writable[]) ((ObjectWritable) recordValue).get(); - vectorizedSerde = true; - } - } catch (SerDeException e1) { - throw new HiveException(e1); - } - for (int i = 0; i < vrg.size; i++) { - Writable row = null; - if (vectorizedSerde) { - row = records[i]; - } else { - if (vrg.valueWriters == null) { - vrg.setValueWriters(this.valueWriters); - } - try { - row = serializer.serialize(getRowObject(vrg, i), inputObjInspectors[0]); - } catch (SerDeException ex) { - throw new HiveException(ex); - } - } - /* Create list bucketing sub-directory only if stored-as-directories is on. */ - String lbDirName = null; - lbDirName = (lbCtx == null) ? null : generateListBucketingDirName(row); - - FSPaths fpaths; - - if (!bDynParts && !filesCreated) { - if (lbDirName != null) { - FSPaths fsp2 = lookupListBucketingPaths(lbDirName); - } else { - createBucketFiles(fsp); - } - } - - try { - updateProgress(); - - // if DP is enabled, get the final output writers and prepare the real output row - assert inputObjInspectors[0].getCategory() == ObjectInspector.Category.STRUCT : "input object inspector is not struct"; - - if (bDynParts) { - // copy the DP column values from the input row to dpVals - dpVals.clear(); - dpWritables.clear(); - ObjectInspectorUtils.partialCopyToStandardObject(dpWritables, row, dpStartCol, numDynParts, - (StructObjectInspector) inputObjInspectors[0], ObjectInspectorCopyOption.WRITABLE); - // get a set of RecordWriter based on the DP column values - // pass the null value along to the escaping process to determine what the dir should be - for (Object o : dpWritables) { - if (o == null || o.toString().length() == 0) { - dpVals.add(dpCtx.getDefaultPartitionName()); - } else { - dpVals.add(o.toString()); - } - } - fpaths = getDynOutPaths(dpVals, lbDirName); - - } else { - if (lbDirName != null) { - fpaths = lookupListBucketingPaths(lbDirName); - } else { - fpaths = fsp; - } - } - - rowOutWriters = fpaths.getOutWriters(); - // check if all record writers implement statistics. if atleast one RW - // doesn't implement stats interface we will fallback to conventional way - // of gathering stats - isCollectRWStats = areAllTrue(statsFromRecordWriter); - if (conf.isGatherStats() && !isCollectRWStats) { - if (statsCollectRawDataSize) { - SerDeStats stats = serializer.getSerDeStats(); - if (stats != null) { - fpaths.getStat().addToStat(StatsSetupConst.RAW_DATA_SIZE, stats.getRawDataSize()); - } - } - fpaths.getStat().addToStat(StatsSetupConst.ROW_COUNT, 1); - } - - - if (row_count != null) { - row_count.set(row_count.get() + 1); - } - - if (!multiFileSpray) { - rowOutWriters[0].write(row); - } else { - int keyHashCode = 0; - key.setHashCode(keyHashCode); - int bucketNum = prtner.getBucket(key, null, totalFiles); - int idx = bucketMap.get(bucketNum); - rowOutWriters[idx].write(row); - } - } catch (IOException e) { - throw new HiveException(e); - } + Object[] row = getRowObject(vrg, i); + super.processOp(row, tag); } } @@ -187,7 +73,7 @@ public void processOp(Object data, int tag) throws HiveException { } for (int i = 0; i < vrg.projectionSize; i++) { ColumnVector vectorColumn = vrg.cols[vrg.projectedColumns[i]]; - singleRow[i] = vrg.valueWriters[i].writeValue(vectorColumn, batchIndex); + singleRow[i] = valueWriters[i].writeValue(vectorColumn, batchIndex); } return singleRow; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java index e70eb04..6274cb6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java @@ -653,6 +653,21 @@ public void close(boolean aborted) throws HiveException { /** * Sorted reduce group batch processing mode. Each input VectorizedRowBatch will have the * same key. On endGroup (or close), the intermediate values are flushed. + * + * We build the output rows one-at-a-time in the output vectorized row batch (outputBatch) + * in 2 steps: + * + * 1) Just after startGroup, we copy the group key to the next position in the output batch, + * but don't increment the size in the batch (yet). This is done with the copyGroupKey + * method of VectorGroupKeyHelper. The next position is outputBatch.size + * + * We know the same key is used for the whole batch (i.e. repeating) since that is how + * vectorized reduce-shuffle feeds the batches to us. + * + * 2) Later at endGroup after reduce-shuffle has fed us all the input batches for the group, + * we fill in the aggregation columns in outputBatch at outputBatch.size. Our method + * writeGroupRow does this and finally increments outputBatch.size. + * */ private class ProcessingModeGroupBatches extends ProcessingModeBase { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java index 51beb7c..3c4f6cf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupKeyHelper.java @@ -42,19 +42,38 @@ void init(VectorExpression[] keyExpressions) throws HiveException { finishAdding(); } + /* + * This helper method copies the group keys from one vectorized row batch to another, + * but does not increment the outputBatch.size (i.e. the next output position). + * + * It was designed for VectorGroupByOperator's sorted reduce group batch processing mode + * to copy the group keys at startGroup. + */ public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outputBatch, DataOutputBuffer buffer) throws HiveException { - // Grab the key at index 0. We don't care about selected or repeating since all keys in the input batch are the same. for(int i = 0; i< longIndices.length; ++i) { int keyIndex = longIndices[i]; LongColumnVector inputColumnVector = (LongColumnVector) inputBatch.cols[keyIndex]; LongColumnVector outputColumnVector = (LongColumnVector) outputBatch.cols[keyIndex]; + + // This vectorized code pattern says: + // If the input batch has no nulls at all (noNulls is true) OR + // the input row is NOT NULL, copy the value. + // + // Otherwise, we have a NULL input value. The standard way to mark a NULL in the + // output batch is: turn off noNulls indicating there is at least one NULL in the batch + // and mark that row as NULL. + // + // When a vectorized row batch is reset, noNulls is set to true and the isNull array + // is zeroed. + // + // We grab the key at index 0. We don't care about selected or repeating since all keys + // in the input batch are suppose to be the same. + // if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) { outputColumnVector.vector[outputBatch.size] = inputColumnVector.vector[0]; - } else if (inputColumnVector.noNulls ){ - outputColumnVector.noNulls = false; - outputColumnVector.isNull[outputBatch.size] = true; } else { + outputColumnVector.noNulls = false; outputColumnVector.isNull[outputBatch.size] = true; } } @@ -64,10 +83,8 @@ public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outpu DoubleColumnVector outputColumnVector = (DoubleColumnVector) outputBatch.cols[keyIndex]; if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) { outputColumnVector.vector[outputBatch.size] = inputColumnVector.vector[0]; - } else if (inputColumnVector.noNulls ){ - outputColumnVector.noNulls = false; - outputColumnVector.isNull[outputBatch.size] = true; } else { + outputColumnVector.noNulls = false; outputColumnVector.isNull[outputBatch.size] = true; } } @@ -85,10 +102,8 @@ public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outpu throw new IllegalStateException("bad write", ioe); } outputColumnVector.setRef(outputBatch.size, buffer.getData(), start, length); - } else if (inputColumnVector.noNulls ){ - outputColumnVector.noNulls = false; - outputColumnVector.isNull[outputBatch.size] = true; } else { + outputColumnVector.noNulls = false; outputColumnVector.isNull[outputBatch.size] = true; } } @@ -98,10 +113,8 @@ public void copyGroupKey(VectorizedRowBatch inputBatch, VectorizedRowBatch outpu DecimalColumnVector outputColumnVector = (DecimalColumnVector) outputBatch.cols[keyIndex]; if (inputColumnVector.noNulls || !inputColumnVector.isNull[0]) { outputColumnVector.vector[outputBatch.size] = inputColumnVector.vector[0]; - } else if (inputColumnVector.noNulls ){ - outputColumnVector.noNulls = false; - outputColumnVector.isNull[outputBatch.size] = true; } else { + outputColumnVector.noNulls = false; outputColumnVector.isNull[outputBatch.size] = true; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java index c7e044e..1c366df 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorHashKeyWrapper.java @@ -36,6 +36,12 @@ */ public class VectorHashKeyWrapper extends KeyWrapper { + private static final int[] EMPTY_INT_ARRAY = new int[0]; + private static final long[] EMPTY_LONG_ARRAY = new long[0]; + private static final double[] EMPTY_DOUBLE_ARRAY = new double[0]; + private static final byte[][] EMPTY_BYTES_ARRAY = new byte[0][]; + private static final Decimal128[] EMPTY_DECIMAL_ARRAY = new Decimal128[0]; + private long[] longValues; private double[] doubleValues; @@ -50,15 +56,21 @@ public VectorHashKeyWrapper(int longValuesCount, int doubleValuesCount, int byteValuesCount, int decimalValuesCount) { - longValues = new long[longValuesCount]; - doubleValues = new double[doubleValuesCount]; - decimalValues = new Decimal128[decimalValuesCount]; + longValues = longValuesCount > 0 ? new long[longValuesCount] : EMPTY_LONG_ARRAY; + doubleValues = doubleValuesCount > 0 ? new double[doubleValuesCount] : EMPTY_DOUBLE_ARRAY; + decimalValues = decimalValuesCount > 0 ? new Decimal128[decimalValuesCount] : EMPTY_DECIMAL_ARRAY; for(int i = 0; i < decimalValuesCount; ++i) { decimalValues[i] = new Decimal128(); } - byteValues = new byte[byteValuesCount][]; - byteStarts = new int[byteValuesCount]; - byteLengths = new int[byteValuesCount]; + if (byteValuesCount > 0) { + byteValues = new byte[byteValuesCount][]; + byteStarts = new int[byteValuesCount]; + byteLengths = new int[byteValuesCount]; + } else { + byteValues = EMPTY_BYTES_ARRAY; + byteStarts = EMPTY_INT_ARRAY; + byteLengths = EMPTY_INT_ARRAY; + } isNull = new boolean[longValuesCount + doubleValuesCount + byteValuesCount + decimalValuesCount]; hashcode = 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java index c096414..0ae0186 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorReduceSinkOperator.java @@ -18,99 +18,30 @@ package org.apache.hadoop.hive.ql.exec.vector; -import java.io.IOException; -import java.util.Random; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.ql.exec.PTFTopNHash; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; -import org.apache.hadoop.hive.ql.exec.TopNHash; -import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory; -import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.TableDesc; -import org.apache.hadoop.hive.serde2.SerDeException; -import org.apache.hadoop.hive.serde2.Serializer; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; -import org.apache.hadoop.hive.serde2.objectinspector.StandardUnionObjectInspector.StandardUnion; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -// import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; public class VectorReduceSinkOperator extends ReduceSinkOperator { - private static final Log LOG = LogFactory.getLog( - VectorReduceSinkOperator.class.getName()); - private static final long serialVersionUID = 1L; - /** - * The evaluators for the key columns. Key columns decide the sort order on - * the reducer side. Key columns are passed to the reducer in the "key". - */ - private VectorExpression[] keyEval; - - /** - * The key value writers. These know how to write the necessary writable type - * based on key column metadata, from the primitive vector type. - */ - private transient VectorExpressionWriter[] keyWriters; - - /** - * The evaluators for the value columns. Value columns are passed to reducer - * in the "value". - */ - private VectorExpression[] valueEval; - - /** - * The output value writers. These know how to write the necessary writable type - * based on value column metadata, from the primitive vector type. - */ - private transient VectorExpressionWriter[] valueWriters; - - /** - * The evaluators for the partition columns (CLUSTER BY or DISTRIBUTE BY in - * Hive language). Partition columns decide the reducer that the current row - * goes to. Partition columns are not passed to reducer. - */ - private VectorExpression[] partitionEval; - - /** - * Evaluators for bucketing columns. This is used to compute bucket number. - */ - private VectorExpression[] bucketEval; - private int buckColIdxInKey; - - /** - * The partition value writers. These know how to write the necessary writable type - * based on partition column metadata, from the primitive vector type. - */ - private transient VectorExpressionWriter[] partitionWriters; - private transient VectorExpressionWriter[] bucketWriters = null; - - private static final boolean isDebugEnabled = LOG.isDebugEnabled(); + // Writer for producing row from input batch. + private VectorExpressionWriter[] rowWriters; + + protected transient Object[] singleRow; public VectorReduceSinkOperator(VectorizationContext vContext, OperatorDesc conf) throws HiveException { this(); ReduceSinkDesc desc = (ReduceSinkDesc) conf; this.conf = desc; - keyEval = vContext.getVectorExpressions(desc.getKeyCols()); - valueEval = vContext.getVectorExpressions(desc.getValueCols()); - partitionEval = vContext.getVectorExpressions(desc.getPartitionCols()); - bucketEval = null; - if (desc.getBucketCols() != null && !desc.getBucketCols().isEmpty()) { - bucketEval = vContext.getVectorExpressions(desc.getBucketCols()); - buckColIdxInKey = desc.getPartitionCols().size(); - } } public VectorReduceSinkOperator() { @@ -119,399 +50,49 @@ public VectorReduceSinkOperator() { @Override protected void initializeOp(Configuration hconf) throws HiveException { - try { - numDistributionKeys = conf.getNumDistributionKeys(); - distinctColIndices = conf.getDistinctColumnIndices(); - numDistinctExprs = distinctColIndices.size(); - - TableDesc keyTableDesc = conf.getKeySerializeInfo(); - keySerializer = (Serializer) keyTableDesc.getDeserializerClass() - .newInstance(); - keySerializer.initialize(null, keyTableDesc.getProperties()); - keyIsText = keySerializer.getSerializedClass().equals(Text.class); - - /* - * Compute and assign the key writers and the key object inspector - */ - VectorExpressionWriterFactory.processVectorExpressions( - conf.getKeyCols(), - conf.getOutputKeyColumnNames(), - new VectorExpressionWriterFactory.SingleOIDClosure() { - @Override - public void assign(VectorExpressionWriter[] writers, - ObjectInspector objectInspector) { - keyWriters = writers; - keyObjectInspector = objectInspector; - } - }); - - String colNames = ""; - for(String colName : conf.getOutputKeyColumnNames()) { - colNames = String.format("%s %s", colNames, colName); - } - - if (isDebugEnabled) { - LOG.debug(String.format("keyObjectInspector [%s]%s => %s", - keyObjectInspector.getClass(), - keyObjectInspector, - colNames)); - } - - partitionWriters = VectorExpressionWriterFactory.getExpressionWriters(conf.getPartitionCols()); - if (conf.getBucketCols() != null && !conf.getBucketCols().isEmpty()) { - bucketWriters = VectorExpressionWriterFactory.getExpressionWriters(conf.getBucketCols()); - } - - TableDesc valueTableDesc = conf.getValueSerializeInfo(); - valueSerializer = (Serializer) valueTableDesc.getDeserializerClass() - .newInstance(); - valueSerializer.initialize(null, valueTableDesc.getProperties()); - - /* - * Compute and assign the value writers and the value object inspector - */ - VectorExpressionWriterFactory.processVectorExpressions( - conf.getValueCols(), - conf.getOutputValueColumnNames(), - new VectorExpressionWriterFactory.SingleOIDClosure() { - @Override - public void assign(VectorExpressionWriter[] writers, - ObjectInspector objectInspector) { - valueWriters = writers; - valueObjectInspector = objectInspector; + // We need a input object inspector that is for the row we will extract out of the + // vectorized row batch, not for example, an original inspector for an ORC table, etc. + VectorExpressionWriterFactory.processVectorInspector( + (StructObjectInspector) inputObjInspectors[0], + new VectorExpressionWriterFactory.SingleOIDClosure() { + @Override + public void assign(VectorExpressionWriter[] writers, + ObjectInspector objectInspector) { + rowWriters = writers; + inputObjInspectors[0] = objectInspector; } - }); - - if (isDebugEnabled) { - colNames = ""; - for(String colName : conf.getOutputValueColumnNames()) { - colNames = String.format("%s %s", colNames, colName); - } - } + }); + singleRow = new Object[rowWriters.length]; - if (isDebugEnabled) { - LOG.debug(String.format("valueObjectInspector [%s]%s => %s", - valueObjectInspector.getClass(), - valueObjectInspector, - colNames)); - } - - int numKeys = numDistinctExprs > 0 ? numDistinctExprs : 1; - int keyLen = numDistinctExprs > 0 ? numDistributionKeys + 1 : - numDistributionKeys; - cachedKeys = new Object[numKeys][keyLen]; - cachedValues = new Object[valueEval.length]; - - int tag = conf.getTag(); - tagByte[0] = (byte) tag; - LOG.info("Using tag = " + tag); - - int limit = conf.getTopN(); - float memUsage = conf.getTopNMemoryUsage(); - if (limit >= 0 && memUsage > 0) { - reducerHash = conf.isPTFReduceSink() ? new PTFTopNHash() : reducerHash; - reducerHash.initialize(limit, memUsage, conf.isMapGroupBy(), this); - } - - autoParallel = conf.isAutoParallel(); - - } catch(Exception e) { - throw new HiveException(e); - } + // Call ReduceSinkOperator with new input inspector. + super.initializeOp(hconf); } @Override - public void processOp(Object row, int tag) throws HiveException { - VectorizedRowBatch vrg = (VectorizedRowBatch) row; - - if (isDebugEnabled) { - LOG.debug(String.format("sinking %d rows, %d values, %d keys, %d parts", - vrg.size, - valueEval.length, - keyEval.length, - partitionEval.length)); - } - - try { - // Evaluate the keys - for (int i = 0; i < keyEval.length; i++) { - keyEval[i].evaluate(vrg); - } - - // Determine which rows we need to emit based on topN optimization - int startResult = reducerHash.startVectorizedBatch(vrg.size); - if (startResult == TopNHash.EXCLUDE) { - return; // TopN wants us to exclude all rows. - } - // TODO: can we do this later/only for the keys that are needed? E.g. update vrg.selected. - for (int i = 0; i < partitionEval.length; i++) { - partitionEval[i].evaluate(vrg); - } - if (bucketEval != null) { - for (int i = 0; i < bucketEval.length; i++) { - bucketEval[i].evaluate(vrg); - } - } - // run the vector evaluations - for (int i = 0; i < valueEval.length; i++) { - valueEval[i].evaluate(vrg); - } - - boolean useTopN = startResult != TopNHash.FORWARD; - // Go thru the batch once. If we are not using TopN, we will forward all things and be done. - // If we are using topN, we will make the first key for each row and store/forward it. - // Values, hashes and additional distinct rows will be handled in the 2nd pass in that case. - for (int batchIndex = 0 ; batchIndex < vrg.size; ++batchIndex) { - int rowIndex = batchIndex; - if (vrg.selectedInUse) { - rowIndex = vrg.selected[batchIndex]; - } - // First, make distrib key components for this row and determine distKeyLength. - populatedCachedDistributionKeys(vrg, rowIndex, 0); - - // replace bucketing columns with hashcode % numBuckets - int buckNum = -1; - if (bucketEval != null) { - buckNum = computeBucketNumber(vrg, rowIndex, conf.getNumBuckets()); - cachedKeys[0][buckColIdxInKey] = new IntWritable(buckNum); - } - HiveKey firstKey = toHiveKey(cachedKeys[0], tag, null); - int distKeyLength = firstKey.getDistKeyLength(); - // Add first distinct expression, if any. - if (numDistinctExprs > 0) { - populateCachedDistinctKeys(vrg, rowIndex, 0); - firstKey = toHiveKey(cachedKeys[0], tag, distKeyLength); - } - - final int hashCode; - - // distKeyLength doesn't include tag, but includes buckNum in cachedKeys[0] - if (autoParallel && partitionEval.length > 0) { - hashCode = computeMurmurHash(firstKey); - } else { - hashCode = computeHashCode(vrg, rowIndex, buckNum); - } - - firstKey.setHashCode(hashCode); + public void processOp(Object data, int tag) throws HiveException { + VectorizedRowBatch vrg = (VectorizedRowBatch) data; - if (useTopN) { - /* - * in case of TopN for windowing, we need to distinguish between - * rows with null partition keys and rows with value 0 for partition keys. - */ - boolean partkeysNull = conf.isPTFReduceSink() && partitionKeysAreNull(vrg, rowIndex); - reducerHash.tryStoreVectorizedKey(firstKey, partkeysNull, batchIndex); - } else { - // No TopN, just forward the first key and all others. - BytesWritable value = makeValueWritable(vrg, rowIndex); - collect(firstKey, value); - forwardExtraDistinctRows(vrg, rowIndex, hashCode, value, distKeyLength, tag, 0); - } - } - - if (!useTopN) return; // All done. - - // If we use topN, we have called tryStore on every key now. We can process the results. - for (int batchIndex = 0 ; batchIndex < vrg.size; ++batchIndex) { - int result = reducerHash.getVectorizedBatchResult(batchIndex); - if (result == TopNHash.EXCLUDE) continue; - int rowIndex = batchIndex; - if (vrg.selectedInUse) { - rowIndex = vrg.selected[batchIndex]; - } - // Compute value and hashcode - we'd either store or forward them. - BytesWritable value = makeValueWritable(vrg, rowIndex); - int distKeyLength = -1; - int hashCode; - if (result == TopNHash.FORWARD) { - HiveKey firstKey = reducerHash.getVectorizedKeyToForward(batchIndex); - distKeyLength = firstKey.getDistKeyLength(); - hashCode = firstKey.hashCode(); - collect(firstKey, value); - } else { - hashCode = reducerHash.getVectorizedKeyHashCode(batchIndex); - reducerHash.storeValue(result, hashCode, value, true); - distKeyLength = reducerHash.getVectorizedKeyDistLength(batchIndex); - } - // Now forward other the rows if there's multi-distinct (but see TODO in forward...). - // Unfortunately, that means we will have to rebuild the cachedKeys. Start at 1. - if (numDistinctExprs > 1) { - populatedCachedDistributionKeys(vrg, rowIndex, 1); - forwardExtraDistinctRows(vrg, rowIndex, hashCode, value, distKeyLength, tag, 1); - } - } - } catch (SerDeException e) { - throw new HiveException(e); - } catch (IOException e) { - throw new HiveException(e); - } - } - - /** - * This function creates and forwards all the additional KVs for the multi-distinct case, - * after the first (0th) KV pertaining to the row has already been stored or forwarded. - * @param vrg the batch - * @param rowIndex the row index in the batch - * @param hashCode the partitioning hash code to use; same as for the first KV - * @param value the value to use; same as for the first KV - * @param distKeyLength the distribution key length of the first key; TODO probably extraneous - * @param tag the tag - * @param baseIndex the index in cachedKeys where the pre-evaluated distribution keys are stored - */ - private void forwardExtraDistinctRows(VectorizedRowBatch vrg, int rowIndex,int hashCode, - BytesWritable value, int distKeyLength, int tag, int baseIndex) - throws HiveException, SerDeException, IOException { - // TODO: We don't have to forward extra distinct rows immediately (same in non-vector) if - // the first key has already been stored. There's few bytes difference between keys - // for different distincts, and the value/etc. are all the same. - // We could store deltas to re-gen extra rows when flushing TopN. - for (int i = 1; i < numDistinctExprs; i++) { - if (i != baseIndex) { - System.arraycopy(cachedKeys[baseIndex], 0, cachedKeys[i], 0, numDistributionKeys); - } - populateCachedDistinctKeys(vrg, rowIndex, i); - HiveKey hiveKey = toHiveKey(cachedKeys[i], tag, distKeyLength); - hiveKey.setHashCode(hashCode); - collect(hiveKey, value); - } - } - - /** - * Populate distribution keys part of cachedKeys for a particular row from the batch. - * @param vrg the batch - * @param rowIndex the row index in the batch - * @param index the cachedKeys index to write to - */ - private void populatedCachedDistributionKeys( - VectorizedRowBatch vrg, int rowIndex, int index) throws HiveException { - for (int i = 0; i < numDistributionKeys; i++) { - int batchColumn = keyEval[i].getOutputColumn(); - ColumnVector vectorColumn = vrg.cols[batchColumn]; - cachedKeys[index][i] = keyWriters[i].writeValue(vectorColumn, rowIndex); - } - if (cachedKeys[index].length > numDistributionKeys) { - cachedKeys[index][numDistributionKeys] = null; - } - } - - /** - * Populate distinct keys part of cachedKeys for a particular row from the batch. - * @param vrg the batch - * @param rowIndex the row index in the batch - * @param index the cachedKeys index to write to - */ - private void populateCachedDistinctKeys( - VectorizedRowBatch vrg, int rowIndex, int index) throws HiveException { - StandardUnion union; - cachedKeys[index][numDistributionKeys] = union = new StandardUnion( - (byte)index, new Object[distinctColIndices.get(index).size()]); - Object[] distinctParameters = (Object[]) union.getObject(); - for (int distinctParamI = 0; distinctParamI < distinctParameters.length; distinctParamI++) { - int distinctColIndex = distinctColIndices.get(index).get(distinctParamI); - int batchColumn = keyEval[distinctColIndex].getOutputColumn(); - distinctParameters[distinctParamI] = - keyWriters[distinctColIndex].writeValue(vrg.cols[batchColumn], rowIndex); - } - union.setTag((byte) index); - } - - private BytesWritable makeValueWritable(VectorizedRowBatch vrg, int rowIndex) - throws HiveException, SerDeException { - for (int i = 0; i < valueEval.length; i++) { - int batchColumn = valueEval[i].getOutputColumn(); - ColumnVector vectorColumn = vrg.cols[batchColumn]; - cachedValues[i] = valueWriters[i].writeValue(vectorColumn, rowIndex); - } - // Serialize the value - return (BytesWritable)valueSerializer.serialize(cachedValues, valueObjectInspector); - } - - private int computeHashCode(VectorizedRowBatch vrg, int rowIndex, int buckNum) throws HiveException { - // Evaluate the HashCode - int keyHashCode = 0; - if (partitionEval.length == 0) { - // If no partition cols, just distribute the data uniformly to provide better - // load balance. If the requirement is to have a single reducer, we should set - // the number of reducers to 1. - // Use a constant seed to make the code deterministic. - if (random == null) { - random = new Random(12345); - } - keyHashCode = random.nextInt(); - } else { - for (int p = 0; p < partitionEval.length; p++) { - ColumnVector columnVector = vrg.cols[partitionEval[p].getOutputColumn()]; - Object partitionValue = partitionWriters[p].writeValue(columnVector, rowIndex); - keyHashCode = keyHashCode - * 31 - + ObjectInspectorUtils.hashCode( - partitionValue, - partitionWriters[p].getObjectInspector()); - } + for (int batchIndex = 0 ; batchIndex < vrg.size; ++batchIndex) { + Object row = getRowObject(vrg, batchIndex); + super.processOp(row, tag); } - return buckNum < 0 ? keyHashCode : keyHashCode * 31 + buckNum; } - private boolean partitionKeysAreNull(VectorizedRowBatch vrg, int rowIndex) + private Object[] getRowObject(VectorizedRowBatch vrg, int rowIndex) throws HiveException { - if (partitionEval.length != 0) { - for (int p = 0; p < partitionEval.length; p++) { - ColumnVector columnVector = vrg.cols[partitionEval[p].getOutputColumn()]; - Object partitionValue = partitionWriters[p].writeValue(columnVector, - rowIndex); - if (partitionValue != null) { - return false; - } - } - return true; - } - return false; - } - - private int computeBucketNumber(VectorizedRowBatch vrg, int rowIndex, int numBuckets) throws HiveException { - int bucketNum = 0; - for (int p = 0; p < bucketEval.length; p++) { - ColumnVector columnVector = vrg.cols[bucketEval[p].getOutputColumn()]; - Object bucketValue = bucketWriters[p].writeValue(columnVector, rowIndex); - bucketNum = bucketNum - * 31 - + ObjectInspectorUtils.hashCode( - bucketValue, - bucketWriters[p].getObjectInspector()); + int batchIndex = rowIndex; + if (vrg.selectedInUse) { + batchIndex = vrg.selected[rowIndex]; } - - if (bucketNum < 0) { - bucketNum = -1 * bucketNum; + for (int i = 0; i < vrg.projectionSize; i++) { + ColumnVector vectorColumn = vrg.cols[vrg.projectedColumns[i]]; + if (vectorColumn != null) { + singleRow[i] = rowWriters[i].writeValue(vectorColumn, batchIndex); + } else { + // Some columns from tables are not used. + singleRow[i] = null; + } } - - return bucketNum % numBuckets; - } - - static public String getOperatorName() { - return "RS"; - } - - public VectorExpression[] getPartitionEval() { - return partitionEval; - } - - public void setPartitionEval(VectorExpression[] partitionEval) { - this.partitionEval = partitionEval; - } - - public VectorExpression[] getValueEval() { - return valueEval; - } - - public void setValueEval(VectorExpression[] valueEval) { - this.valueEval = valueEval; - } - - public VectorExpression[] getKeyEval() { - return keyEval; - } - - public void setKeyEval(VectorExpression[] keyEval) { - this.keyEval = keyEval; + return singleRow; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java index de33830..34f5823 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizationContext.java @@ -1889,47 +1889,47 @@ static String getUndecoratedName(String hiveTypeName) { // TODO: And, investigate if different reduce-side versions are needed for var* and std*, or if map-side aggregate can be used.. Right now they are conservatively // marked map-side (HASH). static ArrayList aggregatesDefinition = new ArrayList() {{ - add(new AggregateDefinition("min", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, null, VectorUDAFMinLong.class)); - add(new AggregateDefinition("min", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, null, VectorUDAFMinDouble.class)); - add(new AggregateDefinition("min", VectorExpressionDescriptor.ArgumentType.STRING_FAMILY, null, VectorUDAFMinString.class)); - add(new AggregateDefinition("min", VectorExpressionDescriptor.ArgumentType.DECIMAL, null, VectorUDAFMinDecimal.class)); - add(new AggregateDefinition("max", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, null, VectorUDAFMaxLong.class)); - add(new AggregateDefinition("max", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, null, VectorUDAFMaxDouble.class)); - add(new AggregateDefinition("max", VectorExpressionDescriptor.ArgumentType.STRING_FAMILY, null, VectorUDAFMaxString.class)); - add(new AggregateDefinition("max", VectorExpressionDescriptor.ArgumentType.DECIMAL, null, VectorUDAFMaxDecimal.class)); - add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.NONE, GroupByDesc.Mode.HASH, VectorUDAFCountStar.class)); - add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFCount.class)); - add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.MERGEPARTIAL, VectorUDAFCountMerge.class)); - add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFCount.class)); - add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.STRING_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFCount.class)); - add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFCount.class)); - add(new AggregateDefinition("sum", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, null, VectorUDAFSumLong.class)); - add(new AggregateDefinition("sum", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, null, VectorUDAFSumDouble.class)); - add(new AggregateDefinition("sum", VectorExpressionDescriptor.ArgumentType.DECIMAL, null, VectorUDAFSumDecimal.class)); - add(new AggregateDefinition("avg", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFAvgLong.class)); - add(new AggregateDefinition("avg", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFAvgDouble.class)); - add(new AggregateDefinition("avg", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFAvgDecimal.class)); - add(new AggregateDefinition("variance", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarPopLong.class)); - add(new AggregateDefinition("var_pop", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarPopLong.class)); - add(new AggregateDefinition("variance", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarPopDouble.class)); - add(new AggregateDefinition("var_pop", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarPopDouble.class)); - add(new AggregateDefinition("variance", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFVarPopDecimal.class)); - add(new AggregateDefinition("var_pop", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFVarPopDecimal.class)); - add(new AggregateDefinition("var_samp", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarSampLong.class)); - add(new AggregateDefinition("var_samp" , VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarSampDouble.class)); - add(new AggregateDefinition("var_samp" , VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFVarSampDecimal.class)); - add(new AggregateDefinition("std", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopLong.class)); - add(new AggregateDefinition("stddev", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopLong.class)); - add(new AggregateDefinition("stddev_pop", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopLong.class)); - add(new AggregateDefinition("std", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopDouble.class)); - add(new AggregateDefinition("stddev", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopDouble.class)); - add(new AggregateDefinition("stddev_pop", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopDouble.class)); - add(new AggregateDefinition("std", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFStdPopDecimal.class)); - add(new AggregateDefinition("stddev", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFStdPopDecimal.class)); - add(new AggregateDefinition("stddev_pop", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFStdPopDecimal.class)); - add(new AggregateDefinition("stddev_samp", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdSampLong.class)); - add(new AggregateDefinition("stddev_samp", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdSampDouble.class)); - add(new AggregateDefinition("stddev_samp", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFStdSampDecimal.class)); + add(new AggregateDefinition("min", VectorExpressionDescriptor.ArgumentType.INT_DATETIME_FAMILY, null, VectorUDAFMinLong.class)); + add(new AggregateDefinition("min", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, null, VectorUDAFMinDouble.class)); + add(new AggregateDefinition("min", VectorExpressionDescriptor.ArgumentType.STRING_FAMILY, null, VectorUDAFMinString.class)); + add(new AggregateDefinition("min", VectorExpressionDescriptor.ArgumentType.DECIMAL, null, VectorUDAFMinDecimal.class)); + add(new AggregateDefinition("max", VectorExpressionDescriptor.ArgumentType.INT_DATETIME_FAMILY, null, VectorUDAFMaxLong.class)); + add(new AggregateDefinition("max", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, null, VectorUDAFMaxDouble.class)); + add(new AggregateDefinition("max", VectorExpressionDescriptor.ArgumentType.STRING_FAMILY, null, VectorUDAFMaxString.class)); + add(new AggregateDefinition("max", VectorExpressionDescriptor.ArgumentType.DECIMAL, null, VectorUDAFMaxDecimal.class)); + add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.NONE, GroupByDesc.Mode.HASH, VectorUDAFCountStar.class)); + add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.INT_DATETIME_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFCount.class)); + add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, GroupByDesc.Mode.MERGEPARTIAL, VectorUDAFCountMerge.class)); + add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFCount.class)); + add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.STRING_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFCount.class)); + add(new AggregateDefinition("count", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFCount.class)); + add(new AggregateDefinition("sum", VectorExpressionDescriptor.ArgumentType.INT_FAMILY, null, VectorUDAFSumLong.class)); + add(new AggregateDefinition("sum", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, null, VectorUDAFSumDouble.class)); + add(new AggregateDefinition("sum", VectorExpressionDescriptor.ArgumentType.DECIMAL, null, VectorUDAFSumDecimal.class)); + add(new AggregateDefinition("avg", VectorExpressionDescriptor.ArgumentType.INT_TIMESTAMP_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFAvgLong.class)); + add(new AggregateDefinition("avg", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFAvgDouble.class)); + add(new AggregateDefinition("avg", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFAvgDecimal.class)); + add(new AggregateDefinition("variance", VectorExpressionDescriptor.ArgumentType.INT_TIMESTAMP_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarPopLong.class)); + add(new AggregateDefinition("var_pop", VectorExpressionDescriptor.ArgumentType.INT_TIMESTAMP_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarPopLong.class)); + add(new AggregateDefinition("variance", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarPopDouble.class)); + add(new AggregateDefinition("var_pop", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarPopDouble.class)); + add(new AggregateDefinition("variance", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFVarPopDecimal.class)); + add(new AggregateDefinition("var_pop", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFVarPopDecimal.class)); + add(new AggregateDefinition("var_samp", VectorExpressionDescriptor.ArgumentType.INT_TIMESTAMP_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarSampLong.class)); + add(new AggregateDefinition("var_samp" , VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFVarSampDouble.class)); + add(new AggregateDefinition("var_samp" , VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFVarSampDecimal.class)); + add(new AggregateDefinition("std", VectorExpressionDescriptor.ArgumentType.INT_TIMESTAMP_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopLong.class)); + add(new AggregateDefinition("stddev", VectorExpressionDescriptor.ArgumentType.INT_TIMESTAMP_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopLong.class)); + add(new AggregateDefinition("stddev_pop", VectorExpressionDescriptor.ArgumentType.INT_TIMESTAMP_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopLong.class)); + add(new AggregateDefinition("std", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopDouble.class)); + add(new AggregateDefinition("stddev", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopDouble.class)); + add(new AggregateDefinition("stddev_pop", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdPopDouble.class)); + add(new AggregateDefinition("std", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFStdPopDecimal.class)); + add(new AggregateDefinition("stddev", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFStdPopDecimal.class)); + add(new AggregateDefinition("stddev_pop", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFStdPopDecimal.class)); + add(new AggregateDefinition("stddev_samp", VectorExpressionDescriptor.ArgumentType.INT_TIMESTAMP_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdSampLong.class)); + add(new AggregateDefinition("stddev_samp", VectorExpressionDescriptor.ArgumentType.FLOAT_FAMILY, GroupByDesc.Mode.HASH, VectorUDAFStdSampDouble.class)); + add(new AggregateDefinition("stddev_samp", VectorExpressionDescriptor.ArgumentType.DECIMAL, GroupByDesc.Mode.HASH, VectorUDAFStdSampDecimal.class)); }}; public VectorAggregateExpression getAggregatorExpression(AggregationDesc desc, boolean isReduce) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java index c77d002..21c757e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorizedRowBatchCtx.java @@ -140,6 +140,20 @@ public void init(Configuration hiveConf, String fileKey, /** + * Initializes the VectorizedRowBatch context based on an scratch column type map and + * object inspector. + * @param columnTypeMap + * @param rowOI + * Object inspector that shapes the column types + */ + public void init(Map columnTypeMap, + StructObjectInspector rowOI) { + this.columnTypeMap = columnTypeMap; + this.rowOI= rowOI; + this.rawRowOI = rowOI; + } + + /** * Initializes VectorizedRowBatch context based on the * split and Hive configuration (Job conf with hive Plan). * diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java index c037ea8..85f3e24 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorExpressionWriterFactory.java @@ -1060,6 +1060,31 @@ public static void processVectorExpressions( closure.assign(writers, oids); } + /** + * Creates the value writers for an struct object inspector. + * Creates an appropriate output object inspector. + */ + public static void processVectorInspector( + StructObjectInspector structObjInspector, + SingleOIDClosure closure) + throws HiveException { + List fields = structObjInspector.getAllStructFieldRefs(); + VectorExpressionWriter[] writers = new VectorExpressionWriter[fields.size()]; + List oids = new ArrayList(writers.length); + ArrayList columnNames = new ArrayList(); + int i = 0; + for(StructField field : fields) { + ObjectInspector fieldObjInsp = field.getFieldObjectInspector(); + writers[i] = VectorExpressionWriterFactory. + genVectorExpressionWritable(fieldObjInsp); + columnNames.add(field.getFieldName()); + oids.add(writers[i].getObjectInspector()); + i++; + } + ObjectInspector objectInspector = ObjectInspectorFactory. + getStandardStructObjectInspector(columnNames,oids); + closure.assign(writers, objectInspector); + } /** * Returns {@link VectorExpressionWriter} objects for the fields in the given diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java index 49c095a..260444f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java @@ -147,7 +147,7 @@ public String getIpAddress() { } public String getOperationName() { - return SessionState.get().getHiveOperation().name(); + return queryPlan.getOperationName(); } public String getUserName() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 65a795c..928fd61 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -40,6 +40,8 @@ * are used by the compactor and cleaner and thus must be format agnostic. */ public class AcidUtils { + // This key will be put in the conf file when planning an acid operation + public static final String CONF_ACID_KEY = "hive.doing.acid"; public static final String BASE_PREFIX = "base_"; public static final String DELTA_PREFIX = "delta_"; public static final PathFilter deltaFileFilter = new PathFilter() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java index c5f6c1e..58e1ceb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java @@ -161,10 +161,11 @@ protected void updateIOContext() } public IOContext getIOContext() { - return IOContext.get(); + return IOContext.get(jobConf); } - public void initIOContext(long startPos, boolean isBlockPointer, Path inputPath) { + private void initIOContext(long startPos, boolean isBlockPointer, + Path inputPath) { ioCxtRef = this.getIOContext(); ioCxtRef.currentBlockStart = startPos; ioCxtRef.isBlockPointer = isBlockPointer; @@ -183,7 +184,7 @@ public void initIOContext(FileSplit split, JobConf job, boolean blockPointer = false; long blockStart = -1; - FileSplit fileSplit = (FileSplit) split; + FileSplit fileSplit = split; Path path = fileSplit.getPath(); FileSystem fs = path.getFileSystem(job); if (inputFormatClass.getName().contains("SequenceFile")) { @@ -202,12 +203,15 @@ public void initIOContext(FileSplit split, JobConf job, blockStart = in.getPosition(); in.close(); } + this.jobConf = job; this.initIOContext(blockStart, blockPointer, path.makeQualified(fs)); this.initIOContextSortedProps(split, recordReader, job); } public void initIOContextSortedProps(FileSplit split, RecordReader recordReader, JobConf job) { + this.jobConf = job; + this.getIOContext().resetSortingValues(); this.isSorted = jobConf.getBoolean("hive.input.format.sorted", false); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 8f4aeda..13c9751 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.MapWork; +import org.apache.hadoop.hive.ql.plan.MergeJoinWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; @@ -253,7 +254,14 @@ public RecordReader getRecordReader(InputSplit split, JobConf job, } protected void init(JobConf job) { - mrwork = Utilities.getMapWork(job); + if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { + mrwork = (MapWork) Utilities.getMergeWork(job); + if (mrwork == null) { + mrwork = Utilities.getMapWork(job); + } + } else { + mrwork = Utilities.getMapWork(job); + } pathToPartitionInfo = mrwork.getPathToPartitionInfo(); } @@ -420,6 +428,9 @@ protected static PartitionDesc getPartitionDescFromPath( public static void pushFilters(JobConf jobConf, TableScanOperator tableScan) { + // ensure filters are not set from previous pushFilters + jobConf.unset(TableScanDesc.FILTER_TEXT_CONF_STR); + jobConf.unset(TableScanDesc.FILTER_EXPR_CONF_STR); TableScanDesc scanDesc = tableScan.getConf(); if (scanDesc == null) { return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java index 081b6bd..5fb3b13 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java @@ -18,7 +18,17 @@ package org.apache.hadoop.hive.ql.io; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin; +import org.apache.hadoop.hive.ql.session.SessionState; /** @@ -31,16 +41,35 @@ */ public class IOContext { - private static ThreadLocal threadLocal = new ThreadLocal(){ @Override protected synchronized IOContext initialValue() { return new IOContext(); } }; + private static Map inputNameIOContextMap = new HashMap(); + private static IOContext ioContext = new IOContext(); + + public static Map getMap() { + return inputNameIOContextMap; + } + public static IOContext get() { return IOContext.threadLocal.get(); } + public static IOContext get(Configuration conf) { + if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { + return get(); + } + String inputName = conf.get(Utilities.INPUT_NAME); + if (inputNameIOContextMap.containsKey(inputName) == false) { + IOContext ioContext = new IOContext(); + inputNameIOContextMap.put(inputName, ioContext); + } + + return inputNameIOContextMap.get(inputName); + } + public static void clear() { IOContext.threadLocal.remove(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 0310fdf..9007771 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -132,7 +132,7 @@ @Override public boolean shouldSkipCombine(Path path, Configuration conf) throws IOException { - return AcidUtils.isAcid(path, conf); + return (conf.get(AcidUtils.CONF_ACID_KEY) != null) || AcidUtils.isAcid(path, conf); } private static class OrcRecordReader diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java index ab3164e..b6ad0dc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewInputFormat.java @@ -118,13 +118,11 @@ public boolean nextKeyValue() throws IOException, InterruptedException { public List getSplits(JobContext jobContext) throws IOException, InterruptedException { perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ORC_GET_SPLITS); - Configuration conf = - ShimLoader.getHadoopShims().getConfiguration(jobContext); List splits = OrcInputFormat.generateSplitsInfo(ShimLoader.getHadoopShims() .getConfiguration(jobContext)); - List result = new ArrayList(); - for(OrcSplit split: OrcInputFormat.generateSplitsInfo(conf)) { + List result = new ArrayList(splits.size()); + for(OrcSplit split: splits) { result.add(new OrcNewSplit(split)); } perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ORC_GET_SPLITS); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java index 5bd4599..6344a66 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java @@ -418,138 +418,120 @@ private void writeShortRepeatValues() throws IOException { private void determineEncoding() { - int idx = 0; + // we need to compute zigzag values for DIRECT encoding if we decide to + // break early for delta overflows or for shorter runs + computeZigZagLiterals(); + + zzBits100p = utils.percentileBits(zigzagLiterals, 0, numLiterals, 1.0); + + // not a big win for shorter runs to determine encoding + if (numLiterals <= MIN_REPEAT) { + encoding = EncodingType.DIRECT; + return; + } + + // DELTA encoding check // for identifying monotonic sequences - boolean isIncreasing = false; - int increasingCount = 1; - boolean isDecreasing = false; - int decreasingCount = 1; + boolean isIncreasing = true; + boolean isDecreasing = true; + this.isFixedDelta = true; - // for identifying type of delta encoding - min = literals[0]; + this.min = literals[0]; long max = literals[0]; - isFixedDelta = true; - long currDelta = 0; - - min = literals[0]; - long deltaMax = 0; - - // populate all variables to identify the encoding type - if (numLiterals >= 1) { - currDelta = literals[1] - literals[0]; - for(int i = 0; i < numLiterals; i++) { - if (i > 0 && literals[i] >= max) { - max = literals[i]; - increasingCount++; - } - - if (i > 0 && literals[i] <= min) { - min = literals[i]; - decreasingCount++; - } + final long initialDelta = literals[1] - literals[0]; + long currDelta = initialDelta; + long deltaMax = initialDelta; + this.adjDeltas[0] = initialDelta; + + for (int i = 1; i < numLiterals; i++) { + final long l1 = literals[i]; + final long l0 = literals[i - 1]; + currDelta = l1 - l0; + min = Math.min(min, l1); + max = Math.max(max, l1); + + isIncreasing &= (l0 <= l1); + isDecreasing &= (l0 >= l1); + + isFixedDelta &= (currDelta == initialDelta); + if (i > 1) { + adjDeltas[i - 1] = Math.abs(currDelta); + deltaMax = Math.max(deltaMax, adjDeltas[i - 1]); + } + } - // if delta doesn't changes then mark it as fixed delta - if (i > 0 && isFixedDelta) { - if (literals[i] - literals[i - 1] != currDelta) { - isFixedDelta = false; - } + // its faster to exit under delta overflow condition without checking for + // PATCHED_BASE condition as encoding using DIRECT is faster and has less + // overhead than PATCHED_BASE + if (!utils.isSafeSubtract(max, min)) { + encoding = EncodingType.DIRECT; + return; + } - fixedDelta = currDelta; - } + // invariant - subtracting any number from any other in the literals after + // this point won't overflow + + // if initialDelta is 0 then we cannot delta encode as we cannot identify + // the sign of deltas (increasing or decreasing) + if (initialDelta != 0) { + + // if min is equal to max then the delta is 0, this condition happens for + // fixed values run >10 which cannot be encoded with SHORT_REPEAT + if (min == max) { + assert isFixedDelta : min + "==" + max + + ", isFixedDelta cannot be false"; + assert currDelta == 0 : min + "==" + max + ", currDelta should be zero"; + fixedDelta = 0; + encoding = EncodingType.DELTA; + return; + } - // populate zigzag encoded literals - long zzEncVal = 0; - if (signed) { - zzEncVal = utils.zigzagEncode(literals[i]); - } else { - zzEncVal = literals[i]; - } - zigzagLiterals[idx] = zzEncVal; - idx++; - - // max delta value is required for computing the fixed bits - // required for delta blob in delta encoding - if (i > 0) { - if (i == 1) { - // first value preserve the sign - adjDeltas[i - 1] = literals[i] - literals[i - 1]; - } else { - adjDeltas[i - 1] = Math.abs(literals[i] - literals[i - 1]); - if (adjDeltas[i - 1] > deltaMax) { - deltaMax = adjDeltas[i - 1]; - } - } - } + if (isFixedDelta) { + assert currDelta == initialDelta + : "currDelta should be equal to initialDelta for fixed delta encoding"; + encoding = EncodingType.DELTA; + fixedDelta = currDelta; + return; } // stores the number of bits required for packing delta blob in // delta encoding bitsDeltaMax = utils.findClosestNumBits(deltaMax); - // if decreasing count equals total number of literals then the - // sequence is monotonically decreasing - if (increasingCount == 1 && decreasingCount == numLiterals) { - isDecreasing = true; - } - - // if increasing count equals total number of literals then the - // sequence is monotonically increasing - if (decreasingCount == 1 && increasingCount == numLiterals) { - isIncreasing = true; + // monotonic condition + if (isIncreasing || isDecreasing) { + encoding = EncodingType.DELTA; + return; } } - // if the sequence is both increasing and decreasing then it is not - // monotonic - if (isDecreasing && isIncreasing) { - isDecreasing = false; - isIncreasing = false; - } - - // fixed delta condition - if (isIncreasing == false && isDecreasing == false && isFixedDelta == true) { - encoding = EncodingType.DELTA; - return; - } - - // monotonic condition - if (isIncreasing || isDecreasing) { - encoding = EncodingType.DELTA; - return; - } + // PATCHED_BASE encoding check // percentile values are computed for the zigzag encoded values. if the // number of bit requirement between 90th and 100th percentile varies // beyond a threshold then we need to patch the values. if the variation - // is not significant then we can use direct or delta encoding - - double p = 0.9; - zzBits90p = utils.percentileBits(zigzagLiterals, 0, numLiterals, p); - - p = 1.0; - zzBits100p = utils.percentileBits(zigzagLiterals, 0, numLiterals, p); + // is not significant then we can use direct encoding + zzBits90p = utils.percentileBits(zigzagLiterals, 0, numLiterals, 0.9); int diffBitsLH = zzBits100p - zzBits90p; // if the difference between 90th percentile and 100th percentile fixed // bits is > 1 then we need patch the values - if (isIncreasing == false && isDecreasing == false && diffBitsLH > 1 - && isFixedDelta == false) { + if (diffBitsLH > 1) { + // patching is done only on base reduced values. // remove base from literals - for(int i = 0; i < numLiterals; i++) { + for (int i = 0; i < numLiterals; i++) { baseRedLiterals[i] = literals[i] - min; } // 95th percentile width is used to determine max allowed value // after which patching will be done - p = 0.95; - brBits95p = utils.percentileBits(baseRedLiterals, 0, numLiterals, p); + brBits95p = utils.percentileBits(baseRedLiterals, 0, numLiterals, 0.95); // 100th percentile is used to compute the max patch width - p = 1.0; - brBits100p = utils.percentileBits(baseRedLiterals, 0, numLiterals, p); + brBits100p = utils.percentileBits(baseRedLiterals, 0, numLiterals, 1.0); // after base reducing the values, if the difference in bits between // 95th percentile and 100th percentile value is zero then there @@ -565,19 +547,24 @@ private void determineEncoding() { encoding = EncodingType.DIRECT; return; } - } - - // if difference in bits between 95th percentile and 100th percentile is - // 0, then patch length will become 0. Hence we will fallback to direct - if (isIncreasing == false && isDecreasing == false && diffBitsLH <= 1 - && isFixedDelta == false) { + } else { + // if difference in bits between 95th percentile and 100th percentile is + // 0, then patch length will become 0. Hence we will fallback to direct encoding = EncodingType.DIRECT; return; } + } - // this should not happen - if (encoding == null) { - throw new RuntimeException("Integer encoding cannot be determined."); + private void computeZigZagLiterals() { + // populate zigzag encoded literals + long zzEncVal = 0; + for (int i = 0; i < numLiterals; i++) { + if (signed) { + zzEncVal = utils.zigzagEncode(literals[i]); + } else { + zzEncVal = literals[i]; + } + zigzagLiterals[i] = zzEncVal; } } @@ -700,7 +687,7 @@ private void clear() { patchWidth = 0; gapVsPatchList = null; min = 0; - isFixedDelta = false; + isFixedDelta = true; } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java index b5380c0..b14fa7b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java @@ -1283,4 +1283,9 @@ private long readLongBE8(InStream in, int rbOffset) { + ((readBuffer[rbOffset + 7] & 255) << 0)); } + // Do not want to use Guava LongMath.checkedSubtract() here as it will throw + // ArithmeticException in case of overflow + public boolean isSafeSubtract(long left, long right) { + return (left ^ right) >= 0 | (left ^ (left - right)) >= 0; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java index c5d80f2..582a5df 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java @@ -13,9 +13,6 @@ */ package org.apache.hadoop.hive.ql.io.parquet.convert; -import java.util.List; - -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.io.Writable; @@ -33,7 +30,7 @@ private Writable[] mapPairContainer; public ArrayWritableGroupConverter(final GroupType groupType, final HiveGroupConverter parent, - final int index, List hiveSchemaTypeInfos) { + final int index) { this.parent = parent; this.index = index; int count = groupType.getFieldCount(); @@ -43,8 +40,7 @@ public ArrayWritableGroupConverter(final GroupType groupType, final HiveGroupCon isMap = count == 2; converters = new Converter[count]; for (int i = 0; i < count; i++) { - converters[i] = getConverterFromDescription(groupType.getType(i), i, this, - hiveSchemaTypeInfos); + converters[i] = getConverterFromDescription(groupType.getType(i), i, this); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableGroupConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableGroupConverter.java index 48e4a13..0e310fb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableGroupConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableGroupConverter.java @@ -16,7 +16,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.io.Writable; @@ -37,21 +36,19 @@ private final Object[] currentArr; private Writable[] rootMap; - public DataWritableGroupConverter(final GroupType requestedSchema, final GroupType tableSchema, - final List hiveSchemaTypeInfos) { - this(requestedSchema, null, 0, tableSchema, hiveSchemaTypeInfos); + public DataWritableGroupConverter(final GroupType requestedSchema, final GroupType tableSchema) { + this(requestedSchema, null, 0, tableSchema); final int fieldCount = tableSchema.getFieldCount(); this.rootMap = new Writable[fieldCount]; } public DataWritableGroupConverter(final GroupType groupType, final HiveGroupConverter parent, - final int index, final List hiveSchemaTypeInfos) { - this(groupType, parent, index, groupType, hiveSchemaTypeInfos); + final int index) { + this(groupType, parent, index, groupType); } public DataWritableGroupConverter(final GroupType selectedGroupType, - final HiveGroupConverter parent, final int index, final GroupType containingGroupType, - final List hiveSchemaTypeInfos) { + final HiveGroupConverter parent, final int index, final GroupType containingGroupType) { this.parent = parent; this.index = index; final int totalFieldCount = containingGroupType.getFieldCount(); @@ -65,8 +62,7 @@ public DataWritableGroupConverter(final GroupType selectedGroupType, Type subtype = selectedFields.get(i); if (containingGroupType.getFields().contains(subtype)) { converters[i] = getConverterFromDescription(subtype, - containingGroupType.getFieldIndex(subtype.getName()), this, - hiveSchemaTypeInfos); + containingGroupType.getFieldIndex(subtype.getName()), this); } else { throw new IllegalStateException("Group type [" + containingGroupType + "] does not contain requested field: " + subtype); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java index 0971a68..5a46136 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java @@ -31,10 +31,8 @@ private final DataWritableGroupConverter root; - public DataWritableRecordConverter(final GroupType requestedSchema, final GroupType tableSchema, - final List hiveColumnTypeInfos) { - this.root = new DataWritableGroupConverter(requestedSchema, tableSchema, - hiveColumnTypeInfos); + public DataWritableRecordConverter(final GroupType requestedSchema, final GroupType tableSchema) { + this.root = new DataWritableGroupConverter(requestedSchema, tableSchema); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java index e6fb5ae..bce6400 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java @@ -16,19 +16,12 @@ import java.math.BigDecimal; import java.sql.Timestamp; import java.util.ArrayList; -import java.util.List; -import org.apache.hadoop.hive.common.type.HiveChar; -import org.apache.hadoop.hive.common.type.HiveVarchar; import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTime; import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils; -import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -import org.apache.hadoop.hive.serde2.io.HiveCharWritable; import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; -import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable; import org.apache.hadoop.hive.serde2.io.TimestampWritable; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.FloatWritable; @@ -152,32 +145,6 @@ protected TimestampWritable convert(Binary binary) { } }; } - }, - ECHAR_CONVERTER(HiveCharWritable.class) { - @Override - Converter getConverter(final PrimitiveType type, final int index, final HiveGroupConverter parent) { - return new BinaryConverter(type, parent, index) { - @Override - protected HiveCharWritable convert(Binary binary) { - HiveChar hiveChar = new HiveChar(); - hiveChar.setValue(binary.toStringUsingUTF8()); - return new HiveCharWritable(hiveChar); - } - }; - } - }, - EVARCHAR_CONVERTER(HiveVarcharWritable.class) { - @Override - Converter getConverter(final PrimitiveType type, final int index, final HiveGroupConverter parent) { - return new BinaryConverter(type, parent, index) { - @Override - protected HiveVarcharWritable convert(Binary binary) { - HiveVarchar hiveVarchar = new HiveVarchar(); - hiveVarchar.setValue(binary.toStringUsingUTF8()); - return new HiveVarcharWritable(hiveVarchar); - } - }; - } }; final Class _type; @@ -193,7 +160,7 @@ private ETypeConverter(final Class type) { abstract Converter getConverter(final PrimitiveType type, final int index, final HiveGroupConverter parent); public static Converter getNewConverter(final PrimitiveType type, final int index, - final HiveGroupConverter parent, List hiveSchemaTypeInfos) { + final HiveGroupConverter parent) { if (type.isPrimitive() && (type.asPrimitiveType().getPrimitiveTypeName().equals(PrimitiveType.PrimitiveTypeName.INT96))) { //TODO- cleanup once parquet support Timestamp type annotation. return ETypeConverter.ETIMESTAMP_CONVERTER.getConverter(type, index, parent); @@ -201,15 +168,7 @@ public static Converter getNewConverter(final PrimitiveType type, final int inde if (OriginalType.DECIMAL == type.getOriginalType()) { return EDECIMAL_CONVERTER.getConverter(type, index, parent); } else if (OriginalType.UTF8 == type.getOriginalType()) { - if (hiveSchemaTypeInfos.get(index).getTypeName() - .startsWith(serdeConstants.CHAR_TYPE_NAME)) { - return ECHAR_CONVERTER.getConverter(type, index, parent); - } else if (hiveSchemaTypeInfos.get(index).getTypeName() - .startsWith(serdeConstants.VARCHAR_TYPE_NAME)) { - return EVARCHAR_CONVERTER.getConverter(type, index, parent); - } else if (type.isPrimitive()) { - return ESTRING_CONVERTER.getConverter(type, index, parent); - } + return ESTRING_CONVERTER.getConverter(type, index, parent); } Class javaType = type.getPrimitiveTypeName().javaType; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveGroupConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveGroupConverter.java index a364729..78bdf62 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveGroupConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveGroupConverter.java @@ -13,9 +13,6 @@ */ package org.apache.hadoop.hive.ql.io.parquet.convert; -import java.util.List; - -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.Writable; import parquet.io.api.Converter; @@ -26,20 +23,17 @@ public abstract class HiveGroupConverter extends GroupConverter { protected static Converter getConverterFromDescription(final Type type, final int index, - final HiveGroupConverter parent, List hiveSchemaTypeInfos) { + final HiveGroupConverter parent) { if (type == null) { return null; } if (type.isPrimitive()) { - return ETypeConverter.getNewConverter(type.asPrimitiveType(), index, parent, - hiveSchemaTypeInfos); + return ETypeConverter.getNewConverter(type.asPrimitiveType(), index, parent); } else { if (type.asGroupType().getRepetition() == Repetition.REPEATED) { - return new ArrayWritableGroupConverter(type.asGroupType(), parent, index, - hiveSchemaTypeInfos); + return new ArrayWritableGroupConverter(type.asGroupType(), parent, index); } else { - return new DataWritableGroupConverter(type.asGroupType(), parent, index, - hiveSchemaTypeInfos); + return new DataWritableGroupConverter(type.asGroupType(), parent, index); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java index 5e5df57..2ad7330 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java @@ -14,7 +14,6 @@ package org.apache.hadoop.hive.ql.io.parquet.read; import java.util.ArrayList; -import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -24,8 +23,6 @@ import org.apache.hadoop.hive.ql.io.parquet.convert.DataWritableRecordConverter; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.util.StringUtils; @@ -56,7 +53,7 @@ * From a string which columns names (including hive column), return a list * of string columns * - * @param comma separated list of columns + * @param columns comma separated list of columns * @return list with virtual columns removed */ private static List getColumns(final String columns) { @@ -64,27 +61,6 @@ removeVirtualColumns(StringUtils.getStringCollection(columns)); } - private static List getColumnTypes(Configuration configuration) { - - List columnNames; - String columnNamesProperty = configuration.get(IOConstants.COLUMNS); - if (columnNamesProperty.length() == 0) { - columnNames = new ArrayList(); - } else { - columnNames = Arrays.asList(columnNamesProperty.split(",")); - } - List columnTypes; - String columnTypesProperty = configuration.get(IOConstants.COLUMNS_TYPES); - if (columnTypesProperty.length() == 0) { - columnTypes = new ArrayList(); - } else { - columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypesProperty); - } - - columnTypes = VirtualColumn.removeVirtualColumnTypes(columnNames, columnTypes); - return columnTypes; - } - /** * * It creates the readContext for Parquet side with the requested schema during the init phase. @@ -173,8 +149,7 @@ } final MessageType tableSchema = resolveSchemaAccess(MessageTypeParser. parseMessageType(metadata.get(HIVE_SCHEMA_KEY)), fileSchema, configuration); - return new DataWritableRecordConverter(readContext.getRequestedSchema(), tableSchema, - getColumnTypes(configuration)); + return new DataWritableRecordConverter(readContext.getRequestedSchema(), tableSchema); } /** @@ -194,4 +169,4 @@ private MessageType resolveSchemaAccess(MessageType requestedSchema, MessageType } return requestedSchema; } -} +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java index 5f02950..de7d414 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbLockManager.java @@ -20,6 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.thrift.TException; @@ -42,10 +43,10 @@ private static final long MAX_SLEEP = 15000; private HiveLockManagerCtx context; private Set locks; - private HiveMetaStoreClient client; + private IMetaStoreClient client; private long nextSleep = 50; - DbLockManager(HiveMetaStoreClient client) { + DbLockManager(IMetaStoreClient client) { locks = new HashSet(); this.client = client; } @@ -210,8 +211,8 @@ public int hashCode() { /** * Clear the memory of the locks in this object. This won't clear the locks from the database. * It is for use with - * {@link #DbLockManager(org.apache.hadoop.hive.metastore.HiveMetaStoreClient).commitTxn} and - * {@link #DbLockManager(org.apache.hadoop.hive.metastore.HiveMetaStoreClient).rollbackTxn}. + * {@link #DbLockManager(org.apache.hadoop.hive.metastore.IMetaStoreClient).commitTxn} and + * {@link #DbLockManager(org.apache.hadoop.hive.metastore.IMetaStoreClient).rollbackTxn}. */ void clearLocalLockRecords() { locks.clear(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 64c17dd..46b441a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -31,6 +31,8 @@ import org.apache.hadoop.hive.ql.hooks.Entity; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.thrift.TException; @@ -46,7 +48,7 @@ static final private Log LOG = LogFactory.getLog(CLASS_NAME); private DbLockManager lockMgr = null; - private HiveMetaStoreClient client = null; + private IMetaStoreClient client = null; private long txnId = 0; DbTxnManager() { @@ -284,7 +286,7 @@ public void heartbeat() throws LockException { public ValidTxnList getValidTxns() throws LockException { init(); try { - return client.getValidTxns(); + return client.getValidTxns(txnId); } catch (TException e) { throw new LockException(ErrorMsg.METASTORE_COMMUNICATION_FAILED.getMsg(), e); @@ -311,7 +313,6 @@ protected void destruct() { try { if (txnId > 0) rollbackTxn(); if (lockMgr != null) lockMgr.close(); - if (client != null) client.close(); } catch (Exception e) { LOG.error("Caught exception " + e.getClass().getName() + " with message <" + e.getMessage() + ">, swallowing as there is nothing we can do with it."); @@ -326,10 +327,12 @@ private void init() throws LockException { "methods."); } try { - client = new HiveMetaStoreClient(conf); + Hive db = Hive.get(conf); + client = db.getMSC(); } catch (MetaException e) { - throw new LockException(ErrorMsg.METASTORE_COULD_NOT_INITIATE.getMsg(), - e); + throw new LockException(ErrorMsg.METASTORE_COULD_NOT_INITIATE.getMsg(), e); + } catch (HiveException e) { + throw new LockException(ErrorMsg.METASTORE_COULD_NOT_INITIATE.getMsg(), e); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index f40f5f7..cbbe781 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -109,8 +109,8 @@ import org.apache.hadoop.hive.shims.HadoopShims; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.mapred.InputFormat; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.StringUtils; import org.apache.thrift.TException; import com.google.common.collect.Sets; @@ -378,6 +378,27 @@ public void createTable(String tableName, List columns, List partCols, Class fileInputFormat, Class fileOutputFormat, int bucketCount, List bucketCols) throws HiveException { + createTable(tableName, columns, partCols, fileInputFormat, fileOutputFormat, bucketCount, + bucketCols, null); + } + + /** + * Create a table metadata and the directory for the table data + * @param tableName table name + * @param columns list of fields of the table + * @param partCols partition keys of the table + * @param fileInputFormat Class of the input format of the table data file + * @param fileOutputFormat Class of the output format of the table data file + * @param bucketCount number of buckets that each partition (or the table itself) should be + * divided into + * @param bucketCols Bucket columns + * @param parameters Parameters for the table + * @throws HiveException + */ + public void createTable(String tableName, List columns, List partCols, + Class fileInputFormat, + Class fileOutputFormat, int bucketCount, List bucketCols, + Map parameters) throws HiveException { if (columns == null) { throw new HiveException("columns not specified for table " + tableName); } @@ -402,6 +423,9 @@ public void createTable(String tableName, List columns, tbl.setSerializationLib(LazySimpleSerDe.class.getName()); tbl.setNumBuckets(bucketCount); tbl.setBucketCols(bucketCols); + if (parameters != null) { + tbl.setParamters(parameters); + } createTable(tbl); } @@ -427,9 +451,9 @@ public void alterTable(String tblName, Table newTbl) newTbl.checkValidity(); getMSC().alter_table(names[0], names[1], newTbl.getTTable()); } catch (MetaException e) { - throw new HiveException("Unable to alter table.", e); + throw new HiveException("Unable to alter table. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to alter table.", e); + throw new HiveException("Unable to alter table. " + e.getMessage(), e); } } @@ -455,9 +479,9 @@ public void alterIndex(String dbName, String baseTblName, String idxName, Index try { getMSC().alter_index(dbName, baseTblName, idxName, newIdx); } catch (MetaException e) { - throw new HiveException("Unable to alter index.", e); + throw new HiveException("Unable to alter index. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to alter index.", e); + throw new HiveException("Unable to alter index. " + e.getMessage(), e); } } @@ -502,9 +526,9 @@ public void alterPartition(String dbName, String tblName, Partition newPart) getMSC().alter_partition(dbName, tblName, newPart.getTPartition()); } catch (MetaException e) { - throw new HiveException("Unable to alter partition.", e); + throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to alter partition.", e); + throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } } @@ -534,9 +558,9 @@ public void alterPartitions(String tblName, List newParts) } getMSC().alter_partitions(names[0], names[1], newTParts); } catch (MetaException e) { - throw new HiveException("Unable to alter partition.", e); + throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to alter partition.", e); + throw new HiveException("Unable to alter partition. " + e.getMessage(), e); } } /** @@ -578,11 +602,11 @@ public void renamePartition(Table tbl, Map oldPartSpec, Partitio newPart.getTPartition()); } catch (InvalidOperationException e){ - throw new HiveException("Unable to rename partition.", e); + throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } catch (MetaException e) { - throw new HiveException("Unable to rename partition.", e); + throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } catch (TException e) { - throw new HiveException("Unable to rename partition.", e); + throw new HiveException("Unable to rename partition. " + e.getMessage(), e); } } @@ -591,11 +615,11 @@ public void alterDatabase(String dbName, Database db) try { getMSC().alterDatabase(dbName, db); } catch (MetaException e) { - throw new HiveException("Unable to alter database " + dbName, e); + throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } catch (NoSuchObjectException e) { throw new HiveException("Database " + dbName + " does not exists.", e); } catch (TException e) { - throw new HiveException("Unable to alter database " + dbName, e); + throw new HiveException("Unable to alter database " + dbName + ". " + e.getMessage(), e); } } /** @@ -870,14 +894,31 @@ public boolean dropIndex(String db_name, String tbl_name, String index_name, boo try { return getMSC().dropIndex(db_name, tbl_name, index_name, deleteData); } catch (NoSuchObjectException e) { - throw new HiveException("Partition or table doesn't exist.", e); + throw new HiveException("Partition or table doesn't exist. " + e.getMessage(), e); } catch (Exception e) { - throw new HiveException("Unknown error. Please check logs.", e); + throw new HiveException(e.getMessage(), e); } } /** * Drops table along with the data in it. If the table doesn't exist then it + * is a no-op. If ifPurge option is specified it is passed to the + * hdfs command that removes table data from warehouse to make it skip trash. + * + * @param tableName + * table to drop + * @param ifPurge + * completely purge the table (skipping trash) while removing data from warehouse + * @throws HiveException + * thrown if the drop fails + */ + public void dropTable(String tableName, boolean ifPurge) throws HiveException { + String[] names = Utilities.getDbTableName(tableName); + dropTable(names[0], names[1], true, true, ifPurge); + } + + /** + * Drops table along with the data in it. If the table doesn't exist then it * is a no-op * * @param tableName @@ -886,8 +927,7 @@ public boolean dropIndex(String db_name, String tbl_name, String index_name, boo * thrown if the drop fails */ public void dropTable(String tableName) throws HiveException { - String[] names = Utilities.getDbTableName(tableName); - dropTable(names[0], names[1], true, true); + dropTable(tableName, false); } /** @@ -902,7 +942,7 @@ public void dropTable(String tableName) throws HiveException { * thrown if the drop fails */ public void dropTable(String dbName, String tableName) throws HiveException { - dropTable(dbName, tableName, true, true); + dropTable(dbName, tableName, true, true, false); } /** @@ -913,14 +953,31 @@ public void dropTable(String dbName, String tableName) throws HiveException { * @param deleteData * deletes the underlying data along with metadata * @param ignoreUnknownTab - * an exception if thrown if this is falser and table doesn't exist + * an exception is thrown if this is false and the table doesn't exist * @throws HiveException */ public void dropTable(String dbName, String tableName, boolean deleteData, boolean ignoreUnknownTab) throws HiveException { + dropTable(dbName, tableName, deleteData, ignoreUnknownTab, false); + } + /** + * Drops the table. + * + * @param dbName + * @param tableName + * @param deleteData + * deletes the underlying data along with metadata + * @param ignoreUnknownTab + * an exception is thrown if this is false and the table doesn't exist + * @param ifPurge + * completely purge the table skipping trash while removing data from warehouse + * @throws HiveException + */ + public void dropTable(String dbName, String tableName, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) throws HiveException { try { - getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab); + getMSC().dropTable(dbName, tableName, deleteData, ignoreUnknownTab, ifPurge); } catch (NoSuchObjectException e) { if (!ignoreUnknownTab) { throw new HiveException(e); @@ -1008,7 +1065,7 @@ public Table getTable(final String dbName, final String tableName, } return null; } catch (Exception e) { - throw new HiveException("Unable to fetch table " + tableName, e); + throw new HiveException("Unable to fetch table " + tableName + ". " + e.getMessage(), e); } // For non-views, we need to do some extra fixes @@ -1204,6 +1261,15 @@ public Database getDatabaseCurrent() throws HiveException { return getDatabase(currentDb); } + public void loadPartition(Path loadPath, String tableName, + Map partSpec, boolean replace, boolean holdDDLTime, + boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, + boolean isSrcLocal, boolean isAcid) throws HiveException { + Table tbl = getTable(tableName); + loadPartition(loadPath, tbl, partSpec, replace, holdDDLTime, inheritTableSpecs, + isSkewedStoreAsSubdir, isSrcLocal, isAcid); + } + /** * Load a directory into a Hive Table Partition - Alters existing content of * the partition with the contents of loadPath. - If the partition does not @@ -1212,7 +1278,7 @@ public Database getDatabaseCurrent() throws HiveException { * * @param loadPath * Directory containing files to load into Table - * @param tableName + * @param tbl * name of table to be loaded. * @param partSpec * defines which partition needs to be loaded @@ -1225,12 +1291,12 @@ public Database getDatabaseCurrent() throws HiveException { * @param isSrcLocal * If the source directory is LOCAL */ - public void loadPartition(Path loadPath, String tableName, + public Partition loadPartition(Path loadPath, Table tbl, Map partSpec, boolean replace, boolean holdDDLTime, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid) throws HiveException { - Table tbl = getTable(tableName); Path tblDataLocationPath = tbl.getDataLocation(); + Partition newTPart = null; try { /** * Move files before creating the partition since down stream processes @@ -1279,10 +1345,10 @@ public void loadPartition(Path loadPath, String tableName, Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid); } + boolean forceCreate = (!holdDDLTime) ? true : false; + newTPart = getPartition(tbl, partSpec, forceCreate, newPartPath.toString(), inheritTableSpecs); // recreate the partition if it existed before if (!holdDDLTime) { - Partition newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), - inheritTableSpecs); if (isSkewedStoreAsSubdir) { org.apache.hadoop.hive.metastore.api.Partition newCreatedTpart = newTPart.getTPartition(); SkewedInfo skewedInfo = newCreatedTpart.getSd().getSkewedInfo(); @@ -1292,9 +1358,9 @@ public void loadPartition(Path loadPath, String tableName, /* Add list bucketing location mappings. */ skewedInfo.setSkewedColValueLocationMaps(skewedColValueLocationMaps); newCreatedTpart.getSd().setSkewedInfo(skewedInfo); - alterPartition(tbl.getTableName(), new Partition(tbl, newCreatedTpart)); + alterPartition(tbl.getDbName(), tbl.getTableName(), new Partition(tbl, newCreatedTpart)); newTPart = getPartition(tbl, partSpec, true, newPartPath.toString(), inheritTableSpecs); - newCreatedTpart = newTPart.getTPartition(); + return new Partition(tbl, newCreatedTpart); } } } catch (IOException e) { @@ -1307,7 +1373,7 @@ public void loadPartition(Path loadPath, String tableName, LOG.error(StringUtils.stringifyException(e)); throw new HiveException(e); } - + return newTPart; } /** @@ -1403,18 +1469,18 @@ private void constructOneLBLocationMap(FileStatus fSta, * @param replace * @param numDP number of dynamic partitions * @param holdDDLTime - * @return a list of strings with the dynamic partition paths + * @return partition map details (PartitionSpec and Partition) * @throws HiveException */ - public ArrayList> loadDynamicPartitions(Path loadPath, + public Map, Partition> loadDynamicPartitions(Path loadPath, String tableName, Map partSpec, boolean replace, int numDP, boolean holdDDLTime, boolean listBucketingEnabled, boolean isAcid) throws HiveException { Set validPartitions = new HashSet(); try { - ArrayList> fullPartSpecs = - new ArrayList>(); + Map, Partition> partitionsMap = new + LinkedHashMap, Partition>(); FileSystem fs = loadPath.getFileSystem(conf); FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP+1, fs); @@ -1448,6 +1514,7 @@ private void constructOneLBLocationMap(FileStatus fSta, + " to at least " + validPartitions.size() + '.'); } + Table tbl = getTable(tableName); // for each dynamically created DP directory, construct a full partition spec // and load the partition based on that Iterator iter = validPartitions.iterator(); @@ -1460,14 +1527,12 @@ private void constructOneLBLocationMap(FileStatus fSta, // generate a full partition specification LinkedHashMap fullPartSpec = new LinkedHashMap(partSpec); Warehouse.makeSpecFromName(fullPartSpec, partPath); - fullPartSpecs.add(fullPartSpec); - - // finally load the partition -- move the file to the final table address - loadPartition(partPath, tableName, fullPartSpec, replace, holdDDLTime, true, - listBucketingEnabled, false, isAcid); + Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, + holdDDLTime, true, listBucketingEnabled, false, isAcid); + partitionsMap.put(fullPartSpec, newPartition); LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec); } - return fullPartSpecs; + return partitionsMap; } catch (IOException e) { throw new HiveException(e); } @@ -1500,6 +1565,7 @@ public void loadTable(Path loadPath, String tableName, boolean replace, tbl.replaceFiles(loadPath, isSrcLocal); } else { tbl.copyFiles(loadPath, isSrcLocal, isAcid); + tbl.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, "true"); } try { @@ -1613,17 +1679,6 @@ public Partition getPartition(Table tbl, Map partSpec, return getPartition(tbl, partSpec, forceCreate, null, true); } - private static void clearPartitionStats(org.apache.hadoop.hive.metastore.api.Partition tpart) { - Map tpartParams = tpart.getParameters(); - if (tpartParams == null) { - return; - } - - for (String statType : StatsSetupConst.supportedStats) { - tpartParams.remove(statType); - } - } - /** * Returns partition metadata * @@ -1691,7 +1746,7 @@ public Partition getPartition(Table tbl, Map partSpec, throw new HiveException("new partition path should not be null or empty."); } tpart.getSd().setLocation(partPath); - clearPartitionStats(tpart); + tpart.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,"true"); String fullName = tbl.getTableName(); if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) { fullName = tbl.getDbName() + "." + tbl.getTableName(); @@ -1722,7 +1777,7 @@ public boolean dropPartition(String db_name, String tbl_name, } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { - throw new HiveException("Unknown error. Please check logs.", e); + throw new HiveException(e.getMessage(), e); } } @@ -1736,6 +1791,7 @@ public boolean dropPartition(String db_name, String tbl_name, public List dropPartitions(String dbName, String tblName, List partSpecs, boolean deleteData, boolean ignoreProtection, boolean ifExists) throws HiveException { + //TODO: add support for ifPurge try { Table tbl = getTable(dbName, tblName); List> partExprs = @@ -1750,7 +1806,7 @@ public boolean dropPartition(String db_name, String tbl_name, } catch (NoSuchObjectException e) { throw new HiveException("Partition or table doesn't exist.", e); } catch (Exception e) { - throw new HiveException("Unknown error. Please check logs.", e); + throw new HiveException(e.getMessage(), e); } } @@ -2243,7 +2299,7 @@ public boolean accept(Path p) { result.add(srcToDest); } } catch (IOException e) { - throw new HiveException("checkPaths: filesystem error in check phase", e); + throw new HiveException("checkPaths: filesystem error in check phase. " + e.getMessage(), e); } return result; } @@ -2310,7 +2366,7 @@ public static boolean renameFile(HiveConf conf, Path srcf, Path destf, try { ShimLoader.getHadoopShims().setFullFileStatus(conf, destStatus, fs, destf); } catch (IOException e) { - LOG.warn("Error setting permission of file " + destf + ": "+ StringUtils.stringifyException(e)); + LOG.warn("Error setting permission of file " + destf + ": "+ e.getMessage(), e); } } return success; @@ -2349,7 +2405,7 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, srcs = srcFs.globStatus(srcf); } catch (IOException e) { LOG.error(StringUtils.stringifyException(e)); - throw new HiveException("addFiles: filesystem error in check phase", e); + throw new HiveException("addFiles: filesystem error in check phase. " + e.getMessage(), e); } if (srcs == null) { LOG.info("No sources specified to move: " + srcf); @@ -2375,7 +2431,7 @@ static protected void copyFiles(HiveConf conf, Path srcf, Path destf, } } } catch (IOException e) { - throw new HiveException("copyFiles: error while moving files!!!", e); + throw new HiveException("copyFiles: error while moving files!!! " + e.getMessage(), e); } } } @@ -2447,7 +2503,7 @@ private static void moveAcidFiles(FileSystem fs, FileStatus[] stats, Path dst) fs.rename(bucketSrc, bucketDest); } } catch (IOException e) { - throw new HiveException("Error moving acid files", e); + throw new HiveException("Error moving acid files " + e.getMessage(), e); } } } @@ -2679,7 +2735,7 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws throw new HiveException(e); } } - + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException { try { return getMSC().setPartitionColumnStatistics(request); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 6d18884..ecd376d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -451,7 +451,11 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo // Delete table data if (deleteData && !MetaStoreUtils.isExternalTable(table)) { try { - getWh().deleteDir(tablePath, true); + boolean ifPurge = false; + if (envContext != null){ + ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge")); + } + getWh().deleteDir(tablePath, true, ifPurge); } catch (Exception err) { LOG.error("Failed to delete temp table directory: " + tablePath, err); // Forgive error diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index c2e033c..2bbedd3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -385,6 +385,10 @@ public void setProperty(String name, String value) { tTable.getParameters().put(name, value); } + public void setParamters(Map params) { + tTable.setParameters(params); + } + public String getProperty(String name) { return tTable.getParameters().get(name); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java index 790a92e..b323cb5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java @@ -670,10 +670,15 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. cppCtx.getOpToConstantExprs().put(op, constants); foldOperator(op, cppCtx); List colList = op.getConf().getColList(); + List columnNames = op.getConf().getOutputColumnNames(); + Map columnExprMap = op.getColumnExprMap(); if (colList != null) { for (int i = 0; i < colList.size(); i++) { ExprNodeDesc newCol = foldExpr(colList.get(i), constants, cppCtx, op, 0, false); colList.set(i, newCol); + if (columnExprMap != null) { + columnExprMap.put(columnNames.get(i), newCol); + } } LOG.debug("New column list:(" + StringUtils.join(colList, " ") + ")"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index 8e9d3cc..7a3280c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.optimizer; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -29,12 +30,17 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; +import org.apache.hadoop.hive.ql.exec.DummyStoreOperator; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.MuxOperator; import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.exec.TezDummyStoreOperator; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; @@ -42,12 +48,16 @@ import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DynamicPruningEventDesc; +import org.apache.hadoop.hive.ql.plan.CommonMergeJoinDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.JoinCondDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.OpTraits; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.Statistics; +import org.apache.hadoop.util.ReflectionUtils; /** * ConvertJoinMapJoin is an optimization that replaces a common join @@ -60,39 +70,46 @@ static final private Log LOG = LogFactory.getLog(ConvertJoinMapJoin.class.getName()); + @SuppressWarnings("unchecked") @Override - /* - * (non-Javadoc) - * we should ideally not modify the tree we traverse. - * However, since we need to walk the tree at any time when we modify the - * operator, we might as well do it here. - */ - public Object process(Node nd, Stack stack, - NodeProcessorCtx procCtx, Object... nodeOutputs) - throws SemanticException { + /* + * (non-Javadoc) we should ideally not modify the tree we traverse. However, + * since we need to walk the tree at any time when we modify the operator, we + * might as well do it here. + */ + public Object + process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) + throws SemanticException { OptimizeTezProcContext context = (OptimizeTezProcContext) procCtx; - if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN)) { + JoinOperator joinOp = (JoinOperator) nd; + + if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN) + && !(context.conf.getBoolVar(HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN))) { + // we are just converting to a common merge join operator. The shuffle + // join in map-reduce case. + int pos = 0; // it doesn't matter which position we use in this case. + convertJoinSMBJoin(joinOp, context, pos, 0, false, false); return null; } - JoinOperator joinOp = (JoinOperator) nd; - // if we have traits, and table info is present in the traits, we know the + // if we have traits, and table info is present in the traits, we know the // exact number of buckets. Else choose the largest number of estimated // reducers from the parent operators. int numBuckets = -1; int estimatedBuckets = -1; + TezBucketJoinProcCtx tezBucketJoinProcCtx = new TezBucketJoinProcCtx(context.conf); if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ)) { for (OperatorparentOp : joinOp.getParentOperators()) { if (parentOp.getOpTraits().getNumBuckets() > 0) { - numBuckets = (numBuckets < parentOp.getOpTraits().getNumBuckets()) ? - parentOp.getOpTraits().getNumBuckets() : numBuckets; + numBuckets = (numBuckets < parentOp.getOpTraits().getNumBuckets()) ? + parentOp.getOpTraits().getNumBuckets() : numBuckets; } if (parentOp instanceof ReduceSinkOperator) { ReduceSinkOperator rs = (ReduceSinkOperator)parentOp; - estimatedBuckets = (estimatedBuckets < rs.getConf().getNumReducers()) ? + estimatedBuckets = (estimatedBuckets < rs.getConf().getNumReducers()) ? rs.getConf().getNumReducers() : estimatedBuckets; } } @@ -107,29 +124,80 @@ public Object process(Node nd, Stack stack, numBuckets = 1; } LOG.info("Estimated number of buckets " + numBuckets); - int mapJoinConversionPos = mapJoinConversionPos(joinOp, context, numBuckets); + int mapJoinConversionPos = getMapJoinConversionPos(joinOp, context, numBuckets); if (mapJoinConversionPos < 0) { - // we cannot convert to bucket map join, we cannot convert to - // map join either based on the size + // we cannot convert to bucket map join, we cannot convert to + // map join either based on the size. Check if we can convert to SMB join. + if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN) == false) { + convertJoinSMBJoin(joinOp, context, 0, 0, false, false); + return null; + } + Class bigTableMatcherClass = null; + try { + bigTableMatcherClass = + (Class) (Class.forName(HiveConf.getVar( + context.parseContext.getConf(), + HiveConf.ConfVars.HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR))); + } catch (ClassNotFoundException e) { + throw new SemanticException(e.getMessage()); + } + + BigTableSelectorForAutoSMJ bigTableMatcher = + ReflectionUtils.newInstance(bigTableMatcherClass, null); + JoinDesc joinDesc = joinOp.getConf(); + JoinCondDesc[] joinCondns = joinDesc.getConds(); + Set joinCandidates = MapJoinProcessor.getBigTableCandidates(joinCondns); + if (joinCandidates.isEmpty()) { + // This is a full outer join. This can never be a map-join + // of any type. So return false. + return false; + } + mapJoinConversionPos = + bigTableMatcher.getBigTablePosition(context.parseContext, joinOp, joinCandidates); + if (mapJoinConversionPos < 0) { + // contains aliases from sub-query + // we are just converting to a common merge join operator. The shuffle + // join in map-reduce case. + int pos = 0; // it doesn't matter which position we use in this case. + convertJoinSMBJoin(joinOp, context, pos, 0, false, false); + return null; + } + + if (checkConvertJoinSMBJoin(joinOp, context, mapJoinConversionPos, tezBucketJoinProcCtx)) { + convertJoinSMBJoin(joinOp, context, mapJoinConversionPos, + tezBucketJoinProcCtx.getNumBuckets(), tezBucketJoinProcCtx.isSubQuery(), true); + } else { + // we are just converting to a common merge join operator. The shuffle + // join in map-reduce case. + int pos = 0; // it doesn't matter which position we use in this case. + convertJoinSMBJoin(joinOp, context, pos, 0, false, false); + } return null; } - if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ)) { - if (convertJoinBucketMapJoin(joinOp, context, mapJoinConversionPos)) { - return null; + if (numBuckets > 1) { + if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ)) { + if (convertJoinBucketMapJoin(joinOp, context, mapJoinConversionPos, tezBucketJoinProcCtx)) { + return null; + } } } LOG.info("Convert to non-bucketed map join"); // check if we can convert to map join no bucket scaling. - mapJoinConversionPos = mapJoinConversionPos(joinOp, context, 1); + mapJoinConversionPos = getMapJoinConversionPos(joinOp, context, 1); if (mapJoinConversionPos < 0) { + // we are just converting to a common merge join operator. The shuffle + // join in map-reduce case. + int pos = 0; // it doesn't matter which position we use in this case. + convertJoinSMBJoin(joinOp, context, pos, 0, false, false); return null; } MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, mapJoinConversionPos); // map join operator by default has no bucket cols - mapJoinOp.setOpTraits(new OpTraits(null, -1)); + mapJoinOp.setOpTraits(new OpTraits(null, -1, null)); + mapJoinOp.setStatistics(joinOp.getStatistics()); // propagate this change till the next RS for (Operator childOp : mapJoinOp.getChildOperators()) { setAllChildrenTraitsToNull(childOp); @@ -138,11 +206,107 @@ public Object process(Node nd, Stack stack, return null; } + // replaces the join operator with a new CommonJoinOperator, removes the + // parent reduce sinks + private void convertJoinSMBJoin(JoinOperator joinOp, OptimizeTezProcContext context, + int mapJoinConversionPos, int numBuckets, boolean isSubQuery, boolean adjustParentsChildren) + throws SemanticException { + ParseContext parseContext = context.parseContext; + MapJoinDesc mapJoinDesc = null; + if (adjustParentsChildren) { + mapJoinDesc = MapJoinProcessor.getMapJoinDesc(context.conf, parseContext.getOpParseCtx(), + joinOp, parseContext.getJoinContext().get(joinOp), mapJoinConversionPos, true); + } else { + JoinDesc joinDesc = joinOp.getConf(); + // retain the original join desc in the map join. + mapJoinDesc = + new MapJoinDesc(null, null, joinDesc.getExprs(), null, null, + joinDesc.getOutputColumnNames(), mapJoinConversionPos, joinDesc.getConds(), + joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null); + } + + @SuppressWarnings("unchecked") + CommonMergeJoinOperator mergeJoinOp = + (CommonMergeJoinOperator) OperatorFactory.get(new CommonMergeJoinDesc(numBuckets, + isSubQuery, mapJoinConversionPos, mapJoinDesc)); + OpTraits opTraits = + new OpTraits(joinOp.getOpTraits().getBucketColNames(), numBuckets, joinOp.getOpTraits() + .getSortCols()); + mergeJoinOp.setOpTraits(opTraits); + mergeJoinOp.setStatistics(joinOp.getStatistics()); + + for (Operator parentOp : joinOp.getParentOperators()) { + int pos = parentOp.getChildOperators().indexOf(joinOp); + parentOp.getChildOperators().remove(pos); + parentOp.getChildOperators().add(pos, mergeJoinOp); + } + + for (Operator childOp : joinOp.getChildOperators()) { + int pos = childOp.getParentOperators().indexOf(joinOp); + childOp.getParentOperators().remove(pos); + childOp.getParentOperators().add(pos, mergeJoinOp); + } + + List> childOperators = mergeJoinOp.getChildOperators(); + if (childOperators == null) { + childOperators = new ArrayList>(); + mergeJoinOp.setChildOperators(childOperators); + } + + List> parentOperators = mergeJoinOp.getParentOperators(); + if (parentOperators == null) { + parentOperators = new ArrayList>(); + mergeJoinOp.setParentOperators(parentOperators); + } + + childOperators.clear(); + parentOperators.clear(); + childOperators.addAll(joinOp.getChildOperators()); + parentOperators.addAll(joinOp.getParentOperators()); + mergeJoinOp.getConf().setGenJoinKeys(false); + + if (adjustParentsChildren) { + mergeJoinOp.getConf().setGenJoinKeys(true); + List> newParentOpList = + new ArrayList>(); + for (Operator parentOp : mergeJoinOp.getParentOperators()) { + for (Operator grandParentOp : parentOp.getParentOperators()) { + grandParentOp.getChildOperators().remove(parentOp); + grandParentOp.getChildOperators().add(mergeJoinOp); + newParentOpList.add(grandParentOp); + } + } + mergeJoinOp.getParentOperators().clear(); + mergeJoinOp.getParentOperators().addAll(newParentOpList); + List> parentOps = + new ArrayList>(mergeJoinOp.getParentOperators()); + for (Operator parentOp : parentOps) { + int parentIndex = mergeJoinOp.getParentOperators().indexOf(parentOp); + if (parentIndex == mapJoinConversionPos) { + continue; + } + + // insert the dummy store operator here + DummyStoreOperator dummyStoreOp = new TezDummyStoreOperator(); + dummyStoreOp.setParentOperators(new ArrayList>()); + dummyStoreOp.setChildOperators(new ArrayList>()); + dummyStoreOp.getChildOperators().add(mergeJoinOp); + int index = parentOp.getChildOperators().indexOf(mergeJoinOp); + parentOp.getChildOperators().remove(index); + parentOp.getChildOperators().add(index, dummyStoreOp); + dummyStoreOp.getParentOperators().add(parentOp); + mergeJoinOp.getParentOperators().remove(parentIndex); + mergeJoinOp.getParentOperators().add(parentIndex, dummyStoreOp); + } + } + mergeJoinOp.cloneOriginalParentsList(mergeJoinOp.getParentOperators()); + } + private void setAllChildrenTraitsToNull(Operator currentOp) { if (currentOp instanceof ReduceSinkOperator) { return; } - currentOp.setOpTraits(new OpTraits(null, -1)); + currentOp.setOpTraits(new OpTraits(null, -1, null)); for (Operator childOp : currentOp.getChildOperators()) { if ((childOp instanceof ReduceSinkOperator) || (childOp instanceof GroupByOperator)) { break; @@ -151,28 +315,26 @@ private void setAllChildrenTraitsToNull(Operator current } } - private boolean convertJoinBucketMapJoin(JoinOperator joinOp, OptimizeTezProcContext context, - int bigTablePosition) throws SemanticException { - - TezBucketJoinProcCtx tezBucketJoinProcCtx = new TezBucketJoinProcCtx(context.conf); + private boolean convertJoinBucketMapJoin(JoinOperator joinOp, OptimizeTezProcContext context, + int bigTablePosition, TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException { if (!checkConvertJoinBucketMapJoin(joinOp, context, bigTablePosition, tezBucketJoinProcCtx)) { LOG.info("Check conversion to bucket map join failed."); return false; } - MapJoinOperator mapJoinOp = - convertJoinMapJoin(joinOp, context, bigTablePosition); + MapJoinOperator mapJoinOp = convertJoinMapJoin(joinOp, context, bigTablePosition); MapJoinDesc joinDesc = mapJoinOp.getConf(); joinDesc.setBucketMapJoin(true); // we can set the traits for this join operator OpTraits opTraits = new OpTraits(joinOp.getOpTraits().getBucketColNames(), - tezBucketJoinProcCtx.getNumBuckets()); + tezBucketJoinProcCtx.getNumBuckets(), null); mapJoinOp.setOpTraits(opTraits); + mapJoinOp.setStatistics(joinOp.getStatistics()); setNumberOfBucketsOnChildren(mapJoinOp); - // Once the conversion is done, we can set the partitioner to bucket cols on the small table + // Once the conversion is done, we can set the partitioner to bucket cols on the small table Map bigTableBucketNumMapping = new HashMap(); bigTableBucketNumMapping.put(joinDesc.getBigTableAlias(), tezBucketJoinProcCtx.getNumBuckets()); joinDesc.setBigTableBucketNumMapping(bigTableBucketNumMapping); @@ -182,6 +344,54 @@ private boolean convertJoinBucketMapJoin(JoinOperator joinOp, OptimizeTezProcCon return true; } + /* + * This method tries to convert a join to an SMB. This is done based on + * traits. If the sorted by columns are the same as the join columns then, we + * can convert the join to an SMB. Otherwise retain the bucket map join as it + * is still more efficient than a regular join. + */ + private boolean checkConvertJoinSMBJoin(JoinOperator joinOp, OptimizeTezProcContext context, + int bigTablePosition, TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException { + + ReduceSinkOperator bigTableRS = + (ReduceSinkOperator) joinOp.getParentOperators().get(bigTablePosition); + int numBuckets = bigTableRS.getParentOperators().get(0).getOpTraits() + .getNumBuckets(); + + // the sort and bucket cols have to match on both sides for this + // transformation of the join operation + for (Operator parentOp : joinOp.getParentOperators()) { + if (!(parentOp instanceof ReduceSinkOperator)) { + // could be mux/demux operators. Currently not supported + LOG.info("Found correlation optimizer operators. Cannot convert to SMB at this time."); + return false; + } + ReduceSinkOperator rsOp = (ReduceSinkOperator) parentOp; + if (checkColEquality(rsOp.getParentOperators().get(0).getOpTraits().getSortCols(), rsOp + .getOpTraits().getSortCols(), rsOp.getColumnExprMap(), tezBucketJoinProcCtx) == false) { + LOG.info("We cannot convert to SMB because the sort column names do not match."); + return false; + } + + if (checkColEquality(rsOp.getParentOperators().get(0).getOpTraits().getBucketColNames(), rsOp + .getOpTraits().getBucketColNames(), rsOp.getColumnExprMap(), tezBucketJoinProcCtx) + == false) { + LOG.info("We cannot convert to SMB because bucket column names do not match."); + return false; + } + } + + boolean isSubQuery = false; + if (numBuckets < 0) { + isSubQuery = true; + numBuckets = bigTableRS.getConf().getNumReducers(); + } + tezBucketJoinProcCtx.setNumBuckets(numBuckets); + tezBucketJoinProcCtx.setIsSubQuery(isSubQuery); + LOG.info("We can convert the join to an SMB join."); + return true; + } + private void setNumberOfBucketsOnChildren(Operator currentOp) { int numBuckets = currentOp.getOpTraits().getNumBuckets(); for (Operatorop : currentOp.getChildOperators()) { @@ -193,15 +403,13 @@ private void setNumberOfBucketsOnChildren(Operator curre } /* - * We perform the following checks to see if we can convert to a bucket map join - * 1. If the parent reduce sink of the big table side has the same emit key cols as - * its parent, we can create a bucket map join eliminating the reduce sink. - * 2. If we have the table information, we can check the same way as in Mapreduce to - * determine if we can perform a Bucket Map Join. + * If the parent reduce sink of the big table side has the same emit key cols + * as its parent, we can create a bucket map join eliminating the reduce sink. */ - private boolean checkConvertJoinBucketMapJoin(JoinOperator joinOp, - OptimizeTezProcContext context, int bigTablePosition, - TezBucketJoinProcCtx tezBucketJoinProcCtx) throws SemanticException { + private boolean checkConvertJoinBucketMapJoin(JoinOperator joinOp, + OptimizeTezProcContext context, int bigTablePosition, + TezBucketJoinProcCtx tezBucketJoinProcCtx) + throws SemanticException { // bail on mux-operator because mux operator masks the emit keys of the // constituent reduce sinks if (!(joinOp.getParentOperators().get(0) instanceof ReduceSinkOperator)) { @@ -211,14 +419,41 @@ private boolean checkConvertJoinBucketMapJoin(JoinOperator joinOp, } ReduceSinkOperator rs = (ReduceSinkOperator) joinOp.getParentOperators().get(bigTablePosition); + List> parentColNames = rs.getOpTraits().getBucketColNames(); + Operator parentOfParent = rs.getParentOperators().get(0); + List> grandParentColNames = parentOfParent.getOpTraits().getBucketColNames(); + int numBuckets = parentOfParent.getOpTraits().getNumBuckets(); + // all keys matched. + if (checkColEquality(grandParentColNames, parentColNames, rs.getColumnExprMap(), + tezBucketJoinProcCtx) == false) { + LOG.info("No info available to check for bucket map join. Cannot convert"); + return false; + } + /* * this is the case when the big table is a sub-query and is probably - * already bucketed by the join column in say a group by operation + * already bucketed by the join column in say a group by operation */ - List> colNames = rs.getParentOperators().get(0).getOpTraits().getBucketColNames(); - if ((colNames != null) && (colNames.isEmpty() == false)) { - OperatorparentOfParent = rs.getParentOperators().get(0); - for (ListlistBucketCols : parentOfParent.getOpTraits().getBucketColNames()) { + boolean isSubQuery = false; + if (numBuckets < 0) { + isSubQuery = true; + numBuckets = rs.getConf().getNumReducers(); + } + tezBucketJoinProcCtx.setNumBuckets(numBuckets); + tezBucketJoinProcCtx.setIsSubQuery(isSubQuery); + return true; + } + + private boolean checkColEquality(List> grandParentColNames, + List> parentColNames, Map colExprMap, + TezBucketJoinProcCtx tezBucketJoinProcCtx) { + + if ((grandParentColNames == null) || (parentColNames == null)) { + return false; + } + + if ((parentColNames != null) && (parentColNames.isEmpty() == false)) { + for (List listBucketCols : grandParentColNames) { // can happen if this operator does not carry forward the previous bucketing columns // for e.g. another join operator which does not carry one of the sides' key columns if (listBucketCols.isEmpty()) { @@ -226,9 +461,9 @@ private boolean checkConvertJoinBucketMapJoin(JoinOperator joinOp, } int colCount = 0; // parent op is guaranteed to have a single list because it is a reduce sink - for (String colName : rs.getOpTraits().getBucketColNames().get(0)) { + for (String colName : parentColNames.get(0)) { // all columns need to be at least a subset of the parentOfParent's bucket cols - ExprNodeDesc exprNodeDesc = rs.getColumnExprMap().get(colName); + ExprNodeDesc exprNodeDesc = colExprMap.get(colName); if (exprNodeDesc instanceof ExprNodeColumnDesc) { if (((ExprNodeColumnDesc)exprNodeDesc).getColumn().equals(listBucketCols.get(colCount))) { colCount++; @@ -236,32 +471,21 @@ private boolean checkConvertJoinBucketMapJoin(JoinOperator joinOp, break; } } - - if (colCount == rs.getOpTraits().getBucketColNames().get(0).size()) { - // all keys matched. - int numBuckets = parentOfParent.getOpTraits().getNumBuckets(); - boolean isSubQuery = false; - if (numBuckets < 0) { - isSubQuery = true; - numBuckets = rs.getConf().getNumReducers(); - } - tezBucketJoinProcCtx.setNumBuckets(numBuckets); - tezBucketJoinProcCtx.setIsSubQuery(isSubQuery); + + if (colCount == parentColNames.get(0).size()) { return true; } } } return false; } - - LOG.info("No info available to check for bucket map join. Cannot convert"); return false; } - public int mapJoinConversionPos(JoinOperator joinOp, OptimizeTezProcContext context, + public int getMapJoinConversionPos(JoinOperator joinOp, OptimizeTezProcContext context, int buckets) { - Set bigTableCandidateSet = MapJoinProcessor. - getBigTableCandidates(joinOp.getConf().getConds()); + Set bigTableCandidateSet = + MapJoinProcessor.getBigTableCandidates(joinOp.getConf().getConds()); long maxSize = context.conf.getLongVar( HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); @@ -287,7 +511,7 @@ public int mapJoinConversionPos(JoinOperator joinOp, OptimizeTezProcContext cont long inputSize = currInputStat.getDataSize(); if ((bigInputStat == null) || ((bigInputStat != null) && - (inputSize > bigInputStat.getDataSize()))) { + (inputSize > bigInputStat.getDataSize()))) { if (bigTableFound) { // cannot convert to map join; we've already chosen a big table @@ -347,9 +571,9 @@ public int mapJoinConversionPos(JoinOperator joinOp, OptimizeTezProcContext cont * for tez. */ - public MapJoinOperator convertJoinMapJoin(JoinOperator joinOp, OptimizeTezProcContext context, + public MapJoinOperator convertJoinMapJoin(JoinOperator joinOp, OptimizeTezProcContext context, int bigTablePosition) throws SemanticException { - // bail on mux operator because currently the mux operator masks the emit keys + // bail on mux operator because currently the mux operator masks the emit keys // of the constituent reduce sinks. for (Operator parentOp : joinOp.getParentOperators()) { if (parentOp instanceof MuxOperator) { @@ -359,12 +583,12 @@ public MapJoinOperator convertJoinMapJoin(JoinOperator joinOp, OptimizeTezProcCo //can safely convert the join to a map join. ParseContext parseContext = context.parseContext; - MapJoinOperator mapJoinOp = MapJoinProcessor. - convertJoinOpMapJoinOp(context.conf, parseContext.getOpParseCtx(), - joinOp, parseContext.getJoinContext().get(joinOp), bigTablePosition, true); + MapJoinOperator mapJoinOp = + MapJoinProcessor.convertJoinOpMapJoinOp(context.conf, parseContext.getOpParseCtx(), joinOp, + parseContext.getJoinContext().get(joinOp), bigTablePosition, true); - Operator parentBigTableOp - = mapJoinOp.getParentOperators().get(bigTablePosition); + Operator parentBigTableOp = + mapJoinOp.getParentOperators().get(bigTablePosition); if (parentBigTableOp instanceof ReduceSinkOperator) { for (Operator p : parentBigTableOp.getParentOperators()) { // we might have generated a dynamic partition operator chain. Since @@ -380,11 +604,10 @@ public MapJoinOperator convertJoinMapJoin(JoinOperator joinOp, OptimizeTezProcCo } } mapJoinOp.getParentOperators().remove(bigTablePosition); - if (!(mapJoinOp.getParentOperators().contains( - parentBigTableOp.getParentOperators().get(0)))) { + if (!(mapJoinOp.getParentOperators().contains(parentBigTableOp.getParentOperators().get(0)))) { mapJoinOp.getParentOperators().add(bigTablePosition, parentBigTableOp.getParentOperators().get(0)); - } + } parentBigTableOp.getParentOperators().get(0).removeChild(parentBigTableOp); for (Operator op : mapJoinOp.getParentOperators()) { if (!(op.getChildOperators().contains(mapJoinOp))) { @@ -397,15 +620,31 @@ public MapJoinOperator convertJoinMapJoin(JoinOperator joinOp, OptimizeTezProcCo return mapJoinOp; } - private boolean hasDynamicPartitionBroadcast(Operator op) { - if (op instanceof AppMasterEventOperator && op.getConf() instanceof DynamicPruningEventDesc) { - return true; - } - for (Operator c : op.getChildOperators()) { - if (hasDynamicPartitionBroadcast(c)) { - return true; + private boolean hasDynamicPartitionBroadcast(Operator parent) { + boolean hasDynamicPartitionPruning = false; + + for (Operator op: parent.getChildOperators()) { + while (op != null) { + if (op instanceof AppMasterEventOperator && op.getConf() instanceof DynamicPruningEventDesc) { + // found dynamic partition pruning operator + hasDynamicPartitionPruning = true; + break; + } + + if (op instanceof ReduceSinkOperator || op instanceof FileSinkOperator) { + // crossing reduce sink or file sink means the pruning isn't for this parent. + break; + } + + if (op.getChildOperators().size() != 1) { + // dynamic partition pruning pipeline doesn't have multiple children + break; + } + + op = op.getChildOperators().get(0); } } - return false; + + return hasDynamicPartitionPruning; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 70060fc..ece21ae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.optimizer; +import com.google.common.collect.Interner; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; @@ -39,8 +40,6 @@ import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.OperatorUtils; -import org.apache.hadoop.hive.ql.exec.OrcFileMergeOperator; -import org.apache.hadoop.hive.ql.exec.RCFileMergeOperator; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.SMBMapJoinOperator; @@ -101,7 +100,6 @@ import org.apache.hadoop.hive.ql.plan.TezWork; import org.apache.hadoop.hive.ql.stats.StatsFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.mapred.FileInputFormat; import org.apache.hadoop.mapred.InputFormat; import java.io.Serializable; @@ -580,8 +578,6 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set ta } } + public static void internTableDesc(Task task, Interner interner) { + + if (task instanceof ConditionalTask) { + for (Task tsk : ((ConditionalTask) task).getListTasks()) { + internTableDesc(tsk, interner); + } + } else if (task instanceof ExecDriver) { + MapredWork work = (MapredWork) task.getWork(); + work.getMapWork().internTable(interner); + } else if (task != null && (task.getWork() instanceof TezWork)) { + TezWork work = (TezWork)task.getWork(); + for (BaseWork w : work.getAllWorkUnsorted()) { + if (w instanceof MapWork) { + ((MapWork)w).internTable(interner); + } + } + } + if (task.getNumChild() > 0) { + for (Task childTask : task.getChildTasks()) { + internTableDesc(childTask, interner); + } + } + } + /** * create a new plan and return. * @@ -1507,7 +1527,7 @@ private static MapWork createMRWorkForMergingFiles (HiveConf conf, * * @param fsInputDesc * @param finalName - * @param inputFormatClass + * @param inputFormatClass * @return MergeWork if table is stored as RCFile or ORCFile, * null otherwise */ @@ -1714,7 +1734,7 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco // There are separate configuration parameters to control whether to // merge for a map-only job // or for a map-reduce job - if (currTask.getWork() instanceof MapredWork) { + if (currTask.getWork() instanceof MapredWork) { ReduceWork reduceWork = ((MapredWork) currTask.getWork()).getReduceWork(); boolean mergeMapOnly = hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && reduceWork == null; @@ -1813,7 +1833,7 @@ public static Path createMoveTask(Task currTask, boolean return Collections.emptyList(); } - public static List getInputPathsForPartialScan(QBParseInfo parseInfo, StringBuffer aggregationKey) + public static List getInputPathsForPartialScan(QBParseInfo parseInfo, StringBuffer aggregationKey) throws SemanticException { List inputPaths = new ArrayList(); switch (parseInfo.getTableSpec().specType) { @@ -1850,6 +1870,7 @@ public static Path createMoveTask(Task currTask, boolean public static Set> findTopOps(Operator startOp, final Class clazz) { final Set> operators = new LinkedHashSet>(); OperatorUtils.iterateParents(startOp, new NodeUtils.Function>() { + @Override public void apply(Operator argument) { if (argument.getNumParent() == 0 && (clazz == null || clazz.isInstance(argument))) { operators.add(argument); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java index 4dfb66e..46dcfaf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java @@ -389,157 +389,8 @@ static MapJoinOperator convertJoinOpMapJoinOp(HiveConf hconf, JoinOperator op, QBJoinTree joinTree, int mapJoinPos, boolean noCheckOuterJoin) throws SemanticException { - JoinDesc desc = op.getConf(); - JoinCondDesc[] condns = desc.getConds(); - Byte[] tagOrder = desc.getTagOrder(); - - // outer join cannot be performed on a table which is being cached - if (!noCheckOuterJoin) { - if (checkMapJoin(mapJoinPos, condns) < 0) { - throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); - } - } - - // Walk over all the sources (which are guaranteed to be reduce sink - // operators). - // The join outputs a concatenation of all the inputs. - QBJoinTree leftSrc = joinTree.getJoinSrc(); - List oldReduceSinkParentOps = - new ArrayList(op.getNumParent()); - if (leftSrc != null) { - // assert mapJoinPos == 0; - Operator parentOp = op.getParentOperators().get(0); - assert parentOp.getParentOperators().size() == 1; - oldReduceSinkParentOps.add((ReduceSinkOperator) parentOp); - } - - - byte pos = 0; - for (String src : joinTree.getBaseSrc()) { - if (src != null) { - Operator parentOp = op.getParentOperators().get(pos); - assert parentOp.getParentOperators().size() == 1; - oldReduceSinkParentOps.add((ReduceSinkOperator) parentOp); - } - pos++; - } - - Map colExprMap = op.getColumnExprMap(); - List schema = new ArrayList(op.getSchema().getSignature()); - Map> valueExprs = op.getConf().getExprs(); - Map> newValueExprs = new HashMap>(); - for (Map.Entry> entry : valueExprs.entrySet()) { - byte tag = entry.getKey(); - Operator terminal = oldReduceSinkParentOps.get(tag); - - List values = entry.getValue(); - List newValues = ExprNodeDescUtils.backtrack(values, op, terminal); - newValueExprs.put(tag, newValues); - for (int i = 0; i < schema.size(); i++) { - ColumnInfo column = schema.get(i); - if (column == null) { - continue; - } - ExprNodeDesc expr = colExprMap.get(column.getInternalName()); - int index = ExprNodeDescUtils.indexOf(expr, values); - if (index >= 0) { - colExprMap.put(column.getInternalName(), newValues.get(index)); - schema.set(i, null); - } - } - } - - // rewrite value index for mapjoin - Map valueIndices = new HashMap(); - - // get the join keys from old parent ReduceSink operators - Map> keyExprMap = new HashMap>(); - - // construct valueTableDescs and valueFilteredTableDescs - List valueTableDescs = new ArrayList(); - List valueFilteredTableDescs = new ArrayList(); - int[][] filterMap = desc.getFilterMap(); - for (pos = 0; pos < op.getParentOperators().size(); pos++) { - ReduceSinkOperator inputRS = oldReduceSinkParentOps.get(pos); - List keyCols = inputRS.getConf().getKeyCols(); - List valueCols = newValueExprs.get(pos); - if (pos != mapJoinPos) { - // remove values in key exprs for value table schema - // value expression for hashsink will be modified in LocalMapJoinProcessor - int[] valueIndex = new int[valueCols.size()]; - List valueColsInValueExpr = new ArrayList(); - for (int i = 0; i < valueIndex.length; i++) { - ExprNodeDesc expr = valueCols.get(i); - int kindex = ExprNodeDescUtils.indexOf(expr, keyCols); - if (kindex >= 0) { - valueIndex[i] = kindex; - } else { - valueIndex[i] = -valueColsInValueExpr.size() - 1; - valueColsInValueExpr.add(expr); - } - } - if (needValueIndex(valueIndex)) { - valueIndices.put(pos, valueIndex); - } - valueCols = valueColsInValueExpr; - } - // deep copy expr node desc - List valueFilteredCols = ExprNodeDescUtils.clone(valueCols); - if (filterMap != null && filterMap[pos] != null && pos != mapJoinPos) { - ExprNodeColumnDesc isFilterDesc = new ExprNodeColumnDesc(TypeInfoFactory - .getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME), "filter", "filter", false); - valueFilteredCols.add(isFilterDesc); - } - - TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils - .getFieldSchemasFromColumnList(valueCols, "mapjoinvalue")); - TableDesc valueFilteredTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils - .getFieldSchemasFromColumnList(valueFilteredCols, "mapjoinvalue")); - - valueTableDescs.add(valueTableDesc); - valueFilteredTableDescs.add(valueFilteredTableDesc); - - keyExprMap.put(pos, keyCols); - } - - Map> filters = desc.getFilters(); - Map> newFilters = new HashMap>(); - for (Map.Entry> entry : filters.entrySet()) { - byte srcTag = entry.getKey(); - List filter = entry.getValue(); - - Operator terminal = oldReduceSinkParentOps.get(srcTag); - newFilters.put(srcTag, ExprNodeDescUtils.backtrack(filter, op, terminal)); - } - desc.setFilters(filters = newFilters); - - // create dumpfile prefix needed to create descriptor - String dumpFilePrefix = ""; - if( joinTree.getMapAliases() != null ) { - for(String mapAlias : joinTree.getMapAliases()) { - dumpFilePrefix = dumpFilePrefix + mapAlias; - } - dumpFilePrefix = dumpFilePrefix+"-"+PlanUtils.getCountForMapJoinDumpFilePrefix(); - } else { - dumpFilePrefix = "mapfile"+PlanUtils.getCountForMapJoinDumpFilePrefix(); - } - - List keyCols = keyExprMap.get((byte)mapJoinPos); - - List outputColumnNames = op.getConf().getOutputColumnNames(); - TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(hconf, - PlanUtils.getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX)); - JoinCondDesc[] joinCondns = op.getConf().getConds(); - MapJoinDesc mapJoinDescriptor = new MapJoinDesc(keyExprMap, keyTableDesc, newValueExprs, - valueTableDescs, valueFilteredTableDescs, outputColumnNames, mapJoinPos, joinCondns, - filters, op.getConf().getNoOuterJoin(), dumpFilePrefix); - mapJoinDescriptor.setStatistics(op.getConf().getStatistics()); - mapJoinDescriptor.setTagOrder(tagOrder); - mapJoinDescriptor.setNullSafes(desc.getNullSafes()); - mapJoinDescriptor.setFilterMap(desc.getFilterMap()); - if (!valueIndices.isEmpty()) { - mapJoinDescriptor.setValueIndices(valueIndices); - } + MapJoinDesc mapJoinDescriptor = + getMapJoinDesc(hconf, opParseCtxMap, op, joinTree, mapJoinPos, noCheckOuterJoin); // reduce sink row resolver used to generate map join op RowResolver outputRS = opParseCtxMap.get(op).getRowResolver(); @@ -551,6 +402,7 @@ static MapJoinOperator convertJoinOpMapJoinOp(HiveConf hconf, opParseCtxMap.put(mapJoinOp, ctx); mapJoinOp.getConf().setReversedExprs(op.getConf().getReversedExprs()); + Map colExprMap = op.getColumnExprMap(); mapJoinOp.setColumnExprMap(colExprMap); List> childOps = op.getChildOperators(); @@ -1176,4 +1028,168 @@ public void setpGraphContext(ParseContext pGraphContext) { } } + + public static MapJoinDesc getMapJoinDesc(HiveConf hconf, + LinkedHashMap, OpParseContext> opParseCtxMap, + JoinOperator op, QBJoinTree joinTree, int mapJoinPos, boolean noCheckOuterJoin) throws SemanticException { + JoinDesc desc = op.getConf(); + JoinCondDesc[] condns = desc.getConds(); + Byte[] tagOrder = desc.getTagOrder(); + + // outer join cannot be performed on a table which is being cached + if (!noCheckOuterJoin) { + if (checkMapJoin(mapJoinPos, condns) < 0) { + throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); + } + } + + // Walk over all the sources (which are guaranteed to be reduce sink + // operators). + // The join outputs a concatenation of all the inputs. + QBJoinTree leftSrc = joinTree.getJoinSrc(); + List oldReduceSinkParentOps = + new ArrayList(op.getNumParent()); + if (leftSrc != null) { + // assert mapJoinPos == 0; + Operator parentOp = op.getParentOperators().get(0); + assert parentOp.getParentOperators().size() == 1; + oldReduceSinkParentOps.add((ReduceSinkOperator) parentOp); + } + + byte pos = 0; + for (String src : joinTree.getBaseSrc()) { + if (src != null) { + Operator parentOp = op.getParentOperators().get(pos); + assert parentOp.getParentOperators().size() == 1; + oldReduceSinkParentOps.add((ReduceSinkOperator) parentOp); + } + pos++; + } + + Map colExprMap = op.getColumnExprMap(); + List schema = new ArrayList(op.getSchema().getSignature()); + Map> valueExprs = op.getConf().getExprs(); + Map> newValueExprs = new HashMap>(); + for (Map.Entry> entry : valueExprs.entrySet()) { + byte tag = entry.getKey(); + Operator terminal = oldReduceSinkParentOps.get(tag); + + List values = entry.getValue(); + List newValues = ExprNodeDescUtils.backtrack(values, op, terminal); + newValueExprs.put(tag, newValues); + for (int i = 0; i < schema.size(); i++) { + ColumnInfo column = schema.get(i); + if (column == null) { + continue; + } + ExprNodeDesc expr = colExprMap.get(column.getInternalName()); + int index = ExprNodeDescUtils.indexOf(expr, values); + if (index >= 0) { + colExprMap.put(column.getInternalName(), newValues.get(index)); + schema.set(i, null); + } + } + } + + // rewrite value index for mapjoin + Map valueIndices = new HashMap(); + + // get the join keys from old parent ReduceSink operators + Map> keyExprMap = new HashMap>(); + + // construct valueTableDescs and valueFilteredTableDescs + List valueTableDescs = new ArrayList(); + List valueFilteredTableDescs = new ArrayList(); + int[][] filterMap = desc.getFilterMap(); + for (pos = 0; pos < op.getParentOperators().size(); pos++) { + ReduceSinkOperator inputRS = oldReduceSinkParentOps.get(pos); + List keyCols = inputRS.getConf().getKeyCols(); + List valueCols = newValueExprs.get(pos); + if (pos != mapJoinPos) { + // remove values in key exprs for value table schema + // value expression for hashsink will be modified in + // LocalMapJoinProcessor + int[] valueIndex = new int[valueCols.size()]; + List valueColsInValueExpr = new ArrayList(); + for (int i = 0; i < valueIndex.length; i++) { + ExprNodeDesc expr = valueCols.get(i); + int kindex = ExprNodeDescUtils.indexOf(expr, keyCols); + if (kindex >= 0) { + valueIndex[i] = kindex; + } else { + valueIndex[i] = -valueColsInValueExpr.size() - 1; + valueColsInValueExpr.add(expr); + } + } + if (needValueIndex(valueIndex)) { + valueIndices.put(pos, valueIndex); + } + valueCols = valueColsInValueExpr; + } + // deep copy expr node desc + List valueFilteredCols = ExprNodeDescUtils.clone(valueCols); + if (filterMap != null && filterMap[pos] != null && pos != mapJoinPos) { + ExprNodeColumnDesc isFilterDesc = + new ExprNodeColumnDesc( + TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME), "filter", + "filter", false); + valueFilteredCols.add(isFilterDesc); + } + + TableDesc valueTableDesc = + PlanUtils.getMapJoinValueTableDesc(PlanUtils.getFieldSchemasFromColumnList(valueCols, + "mapjoinvalue")); + TableDesc valueFilteredTableDesc = + PlanUtils.getMapJoinValueTableDesc(PlanUtils.getFieldSchemasFromColumnList( + valueFilteredCols, "mapjoinvalue")); + + valueTableDescs.add(valueTableDesc); + valueFilteredTableDescs.add(valueFilteredTableDesc); + + keyExprMap.put(pos, keyCols); + } + + Map> filters = desc.getFilters(); + Map> newFilters = new HashMap>(); + for (Map.Entry> entry : filters.entrySet()) { + byte srcTag = entry.getKey(); + List filter = entry.getValue(); + + Operator terminal = oldReduceSinkParentOps.get(srcTag); + newFilters.put(srcTag, ExprNodeDescUtils.backtrack(filter, op, terminal)); + } + desc.setFilters(filters = newFilters); + + // create dumpfile prefix needed to create descriptor + String dumpFilePrefix = ""; + if (joinTree.getMapAliases() != null) { + for (String mapAlias : joinTree.getMapAliases()) { + dumpFilePrefix = dumpFilePrefix + mapAlias; + } + dumpFilePrefix = dumpFilePrefix + "-" + PlanUtils.getCountForMapJoinDumpFilePrefix(); + } else { + dumpFilePrefix = "mapfile" + PlanUtils.getCountForMapJoinDumpFilePrefix(); + } + + List keyCols = keyExprMap.get((byte) mapJoinPos); + + List outputColumnNames = op.getConf().getOutputColumnNames(); + TableDesc keyTableDesc = + PlanUtils.getMapJoinKeyTableDesc(hconf, + PlanUtils.getFieldSchemasFromColumnList(keyCols, MAPJOINKEY_FIELDPREFIX)); + JoinCondDesc[] joinCondns = op.getConf().getConds(); + MapJoinDesc mapJoinDescriptor = + new MapJoinDesc(keyExprMap, keyTableDesc, newValueExprs, valueTableDescs, + valueFilteredTableDescs, outputColumnNames, mapJoinPos, joinCondns, filters, op + .getConf().getNoOuterJoin(), dumpFilePrefix); + mapJoinDescriptor.setStatistics(op.getConf().getStatistics()); + mapJoinDescriptor.setTagOrder(tagOrder); + mapJoinDescriptor.setNullSafes(desc.getNullSafes()); + mapJoinDescriptor.setFilterMap(desc.getFilterMap()); + if (!valueIndices.isEmpty()) { + mapJoinDescriptor.setValueIndices(valueIndices); + } + + return mapJoinDescriptor; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java new file mode 100644 index 0000000..8516643 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java @@ -0,0 +1,100 @@ +package org.apache.hadoop.hive.ql.optimizer; + +import java.util.HashMap; +import java.util.Map; +import java.util.Stack; + +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; +import org.apache.hadoop.hive.ql.exec.DummyStoreOperator; +import org.apache.hadoop.hive.ql.exec.FileSinkOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.parse.GenTezProcContext; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.MergeJoinWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; +import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; +import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; + +public class MergeJoinProc implements NodeProcessor { + + public Operator getLeafOperator(Operator op) { + for (Operator childOp : op.getChildOperators()) { + // FileSink or ReduceSink operators are used to create vertices. See + // TezCompiler. + if ((childOp instanceof ReduceSinkOperator) || (childOp instanceof FileSinkOperator)) { + return childOp; + } else { + return getLeafOperator(childOp); + } + } + + return null; + } + + @Override + public Object + process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) + throws SemanticException { + GenTezProcContext context = (GenTezProcContext) procCtx; + CommonMergeJoinOperator mergeJoinOp = (CommonMergeJoinOperator) nd; + if (stack.size() < 2 || !(stack.get(stack.size() - 2) instanceof DummyStoreOperator)) { + context.currentMergeJoinOperator = mergeJoinOp; + return null; + } + + TezWork tezWork = context.currentTask.getWork(); + @SuppressWarnings("unchecked") + Operator parentOp = + (Operator) ((stack.get(stack.size() - 2))); + // Guaranteed to be just 1 because each DummyStoreOperator can be part of only one work. + BaseWork parentWork = context.childToWorkMap.get(parentOp).get(0); + + + // we need to set the merge work that has been created as part of the dummy store walk. If a + // merge work already exists for this merge join operator, add the dummy store work to the + // merge work. Else create a merge work, add above work to the merge work + MergeJoinWork mergeWork = null; + if (context.opMergeJoinWorkMap.containsKey(getLeafOperator(mergeJoinOp))) { + // we already have the merge work corresponding to this merge join operator + mergeWork = context.opMergeJoinWorkMap.get(getLeafOperator(mergeJoinOp)); + } else { + mergeWork = new MergeJoinWork(); + tezWork.add(mergeWork); + context.opMergeJoinWorkMap.put(getLeafOperator(mergeJoinOp), mergeWork); + } + + mergeWork.setMergeJoinOperator(mergeJoinOp); + mergeWork.addMergedWork(null, parentWork); + tezWork.setVertexType(mergeWork, VertexType.MULTI_INPUT_UNINITIALIZED_EDGES); + + for (BaseWork grandParentWork : tezWork.getParents(parentWork)) { + parentWork.setName(grandParentWork.getName()); + TezEdgeProperty edgeProp = tezWork.getEdgeProperty(grandParentWork, parentWork); + tezWork.disconnect(grandParentWork, parentWork); + tezWork.connect(grandParentWork, mergeWork, edgeProp); + } + + for (BaseWork childWork : tezWork.getChildren(parentWork)) { + TezEdgeProperty edgeProp = tezWork.getEdgeProperty(parentWork, childWork); + tezWork.disconnect(parentWork, childWork); + tezWork.connect(mergeWork, childWork, edgeProp); + } + + tezWork.remove(parentWork); + + DummyStoreOperator dummyOp = (DummyStoreOperator) (stack.get(stack.size() - 2)); + + parentWork.setTag(mergeJoinOp.getTagForOperator(dummyOp)); + + mergeJoinOp.getParentOperators().remove(dummyOp); + dummyOp.getChildOperators().clear(); + + return true; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java index a211e3d..186243f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java @@ -51,7 +51,13 @@ * @param hiveConf */ public void initialize(HiveConf hiveConf) { + + boolean isTezExecEngine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez"); + boolean isSparkExecEngine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark"); + boolean bucketMapJoinOptimizer = false; + transformations = new ArrayList(); + // Add the transformation that computes the lineage information. transformations.add(new Generator()); if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD)) { @@ -81,15 +87,16 @@ public void initialize(HiveConf hiveConf) { } transformations.add(new SamplePruner()); transformations.add(new MapJoinProcessor()); - boolean bucketMapJoinOptimizer = false; - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN)) { + + if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN)) && !isTezExecEngine) { transformations.add(new BucketMapJoinOptimizer()); bucketMapJoinOptimizer = true; } // If optimize hive.optimize.bucketmapjoin.sortedmerge is set, add both // BucketMapJoinOptimizer and SortedMergeBucketMapJoinOptimizer - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN)) { + if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN)) + && !isTezExecEngine) { if (!bucketMapJoinOptimizer) { // No need to add BucketMapJoinOptimizer twice transformations.add(new BucketMapJoinOptimizer()); @@ -119,7 +126,7 @@ public void initialize(HiveConf hiveConf) { if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCORRELATION) && !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEGROUPBYSKEW) && !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME) && - !HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { + !isTezExecEngine) { transformations.add(new CorrelationOptimizer()); } if (HiveConf.getFloatVar(hiveConf, HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE) > 0) { @@ -128,8 +135,7 @@ public void initialize(HiveConf hiveConf) { if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES)) { transformations.add(new StatsOptimizer()); } - String execEngine = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE); - if ((pctx.getContext().getExplain() || "spark".equals(execEngine)) && !"tez".equals(execEngine)) { + if (pctx.getContext().getExplain() && !isSparkExecEngine && !isTezExecEngine) { transformations.add(new AnnotateWithStatistics()); transformations.add(new AnnotateWithOpTraits()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java index eeef609..65fb66e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ReduceSinkMapJoinProc.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; import org.apache.hadoop.hive.ql.stats.StatsUtils; public class ReduceSinkMapJoinProc implements NodeProcessor { @@ -183,7 +184,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procContext, TezWork tezWork = context.currentTask.getWork(); LOG.debug("connecting "+parentWork.getName()+" with "+myWork.getName()); tezWork.connect(parentWork, myWork, edgeProp); - + if (edgeType == EdgeType.CUSTOM_EDGE) { + tezWork.setVertexType(myWork, VertexType.INITIALIZED_EDGES); + } + ReduceSinkOperator r = null; if (parentRS.getConf().getOutputName() != null) { LOG.debug("Cloning reduce sink for multi-child broadcast edge"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java index c856623..906dadf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java @@ -44,9 +44,9 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.io.ContentSummaryInputFormat; import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.metadata.InputEstimator; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.InputEstimator; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; @@ -55,13 +55,25 @@ import org.apache.hadoop.hive.ql.parse.QB; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.SplitSample; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.FetchWork; import org.apache.hadoop.hive.ql.plan.ListSinkDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; +import org.apache.hadoop.hive.ql.plan.SelectDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDecimal; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUnixTimeStamp; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUtcTimestamp; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.JobConf; @@ -73,9 +85,11 @@ private final Log LOG = LogFactory.getLog(SimpleFetchOptimizer.class.getName()); + @Override public ParseContext transform(ParseContext pctx) throws SemanticException { Map> topOps = pctx.getTopOps(); - if (pctx.getQB().isSimpleSelectQuery() && topOps.size() == 1) { + if (pctx.getQB().getIsQuery() && !pctx.getQB().getParseInfo().isAnalyzeCommand() + && topOps.size() == 1) { // no join, no groupby, no distinct, no lateral view, no subq, // no CTAS or insert, not analyze command, and single sourced. String alias = (String) pctx.getTopOps().keySet().toArray()[0]; @@ -144,7 +158,7 @@ private boolean checkThreshold(FetchData data, int limit, ParseContext pctx) thr // for non-aggressive mode (minimal) // 1. samping is not allowed // 2. for partitioned table, all filters should be targeted to partition column - // 3. SelectOperator should be select star + // 3. SelectOperator should use only simple cast/column access private FetchData checkTree(boolean aggressive, ParseContext pctx, String alias, TableScanOperator ts) throws HiveException { SplitSample splitSample = pctx.getNameToSplitSample().get(alias); @@ -156,7 +170,7 @@ private FetchData checkTree(boolean aggressive, ParseContext pctx, String alias, return null; } - Table table = qb.getMetaData().getAliasToTable().get(alias); + Table table = pctx.getTopToTable().get(ts); if (table == null) { return null; } @@ -181,34 +195,71 @@ private FetchData checkTree(boolean aggressive, ParseContext pctx, String alias, return null; } - private FetchData checkOperators(FetchData fetch, TableScanOperator ts, boolean aggresive, + private FetchData checkOperators(FetchData fetch, TableScanOperator ts, boolean aggressive, boolean bypassFilter) { if (ts.getChildOperators().size() != 1) { return null; } Operator op = ts.getChildOperators().get(0); for (; ; op = op.getChildOperators().get(0)) { - if (aggresive) { - if (!(op instanceof LimitOperator || op instanceof FilterOperator - || op instanceof SelectOperator)) { + if (op instanceof SelectOperator) { + if (!aggressive) { + if (!checkExpressions((SelectOperator) op)) { + break; + } + } + continue; + } + + if (aggressive) { + if (!(op instanceof LimitOperator || op instanceof FilterOperator)) { break; } - } else if (!(op instanceof LimitOperator || (op instanceof FilterOperator && bypassFilter) - || (op instanceof SelectOperator && ((SelectOperator) op).getConf().isSelectStar()))) { + } else if (!(op instanceof LimitOperator || (op instanceof FilterOperator && bypassFilter))) { break; } + if (op.getChildOperators() == null || op.getChildOperators().size() != 1) { return null; } } + if (op instanceof FileSinkOperator) { fetch.scanOp = ts; fetch.fileSink = op; return fetch; } + return null; } + private boolean checkExpressions(SelectOperator op) { + SelectDesc desc = op.getConf(); + for (ExprNodeDesc expr : desc.getColList()) { + if (!checkExpression(expr)) { + return false; + } + } + return true; + } + + private boolean checkExpression(ExprNodeDesc expr) { + if (expr instanceof ExprNodeConstantDesc || expr instanceof ExprNodeColumnDesc) { + return true; + } + + if (expr instanceof ExprNodeGenericFuncDesc) { + GenericUDF udf = ((ExprNodeGenericFuncDesc) expr).getGenericUDF(); + if (udf instanceof GenericUDFToBinary || udf instanceof GenericUDFToChar + || udf instanceof GenericUDFToDate || udf instanceof GenericUDFToDecimal + || udf instanceof GenericUDFToUnixTimeStamp || udf instanceof GenericUDFToUtcTimestamp + || udf instanceof GenericUDFToVarchar) { + return expr.getChildren().size() == 1 && checkExpression(expr.getChildren().get(0)); + } + } + return false; + } + private class FetchData { private final ReadEntity parent; @@ -240,7 +291,7 @@ private FetchData(ReadEntity parent, Table table, PrunedPartitionList partsList, this.splitSample = splitSample; this.onlyPruningFilter = bypassFilter; } - + /* * all filters were executed during partition pruning */ @@ -251,7 +302,7 @@ public boolean hasOnlyPruningFilter() { private FetchWork convertToWork() throws HiveException { inputs.clear(); if (!table.isPartitioned()) { - inputs.add(new ReadEntity(table, parent)); + inputs.add(new ReadEntity(table, parent, parent == null)); FetchWork work = new FetchWork(table.getPath(), Utilities.getTableDesc(table)); PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc()); work.setSplitSample(splitSample); @@ -261,12 +312,12 @@ private FetchWork convertToWork() throws HiveException { List partP = new ArrayList(); for (Partition partition : partsList.getNotDeniedPartns()) { - inputs.add(new ReadEntity(partition, parent)); + inputs.add(new ReadEntity(partition, parent, parent == null)); listP.add(partition.getDataLocation()); partP.add(Utilities.getPartitionDesc(partition)); } Table sourceTable = partsList.getSourceTable(); - inputs.add(new ReadEntity(sourceTable, parent)); + inputs.add(new ReadEntity(sourceTable, parent, parent == null)); TableDesc table = Utilities.getTableDesc(sourceTable); FetchWork work = new FetchWork(listP, partP, table); if (!work.getPartDesc().isEmpty()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java index cbb59ae..d79879c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java @@ -71,7 +71,6 @@ import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.io.IntWritable; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -85,6 +84,7 @@ */ public class SortedDynPartitionOptimizer implements Transform { + private static final String BUCKET_NUMBER_COL_NAME = "_bucket_number"; @Override public ParseContext transform(ParseContext pCtx) throws SemanticException { @@ -216,6 +216,13 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, ReduceSinkDesc rsConf = getReduceSinkDesc(partitionPositions, sortPositions, sortOrder, newValueCols, bucketColumns, numBuckets, fsParent, fsOp.getConf().getWriteType()); + if (!bucketColumns.isEmpty()) { + String tableAlias = outRR.getColumnInfos().get(0).getTabAlias(); + ColumnInfo ci = new ColumnInfo(BUCKET_NUMBER_COL_NAME, TypeInfoFactory.stringTypeInfo, + tableAlias, true, true); + outRR.put(tableAlias, BUCKET_NUMBER_COL_NAME, ci); + } + // Create ReduceSink operator ReduceSinkOperator rsOp = (ReduceSinkOperator) putOpInsertMap( OperatorFactory.getAndMakeChild(rsConf, new RowSchema(outRR.getColumnInfos()), fsParent), @@ -380,8 +387,11 @@ public ReduceSinkDesc getReduceSinkDesc(List partitionPositions, // corresponding with bucket number and hence their OIs for (Integer idx : keyColsPosInVal) { if (idx < 0) { - newKeyCols.add(new ExprNodeConstantDesc(TypeInfoFactory - .getPrimitiveTypeInfoFromPrimitiveWritable(IntWritable.class), -1)); + // add bucket number column to both key and value + ExprNodeConstantDesc encd = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, + BUCKET_NUMBER_COL_NAME); + newKeyCols.add(encd); + newValueCols.add(encd); } else { newKeyCols.add(newValueCols.get(idx).clone()); } @@ -395,7 +405,8 @@ public ReduceSinkDesc getReduceSinkDesc(List partitionPositions, // should honor the ordering of records provided by ORDER BY in SELECT statement ReduceSinkOperator parentRSOp = OperatorUtils.findSingleOperatorUpstream(parent, ReduceSinkOperator.class); - if (parentRSOp != null) { + boolean isOrderBy = parseCtx.getQB().getParseInfo().getDestToOrderBy().size() > 0; + if (parentRSOp != null && isOrderBy) { String parentRSOpOrder = parentRSOp.getConf().getOrder(); if (parentRSOpOrder != null && !parentRSOpOrder.isEmpty() && sortPositions.isEmpty()) { newKeyCols.addAll(parentRSOp.getConf().getKeyCols()); @@ -417,6 +428,9 @@ public ReduceSinkDesc getReduceSinkDesc(List partitionPositions, List outCols = Utilities.getInternalColumnNamesFromSignature(parent.getSchema() .getSignature()); ArrayList outValColNames = Lists.newArrayList(outCols); + if (!bucketColumns.isEmpty()) { + outValColNames.add(BUCKET_NUMBER_COL_NAME); + } List valFields = PlanUtils.getFieldSchemasFromColumnList(newValueCols, outValColNames, 0, ""); TableDesc valueTable = PlanUtils.getReduceValueTableDesc(valFields); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java index 1c959e3..a8cb3c1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/OpTraitsRulesProcFactory.java @@ -23,6 +23,7 @@ import java.util.Map.Entry; import java.util.Stack; +import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; @@ -104,7 +105,12 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, List> listBucketCols = new ArrayList>(); listBucketCols.add(bucketCols); - OpTraits opTraits = new OpTraits(listBucketCols, -1); + int numBuckets = -1; + OpTraits parentOpTraits = rs.getParentOperators().get(0).getConf().getOpTraits(); + if (parentOpTraits != null) { + numBuckets = parentOpTraits.getNumBuckets(); + } + OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listBucketCols); rs.setOpTraits(opTraits); return null; } @@ -163,15 +169,21 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } catch (HiveException e) { prunedPartList = null; } - boolean bucketMapJoinConvertible = checkBucketedTable(table, + boolean isBucketed = checkBucketedTable(table, opTraitsCtx.getParseContext(), prunedPartList); - List>bucketCols = new ArrayList>(); + List> bucketColsList = new ArrayList>(); + List> sortedColsList = new ArrayList>(); int numBuckets = -1; - if (bucketMapJoinConvertible) { - bucketCols.add(table.getBucketCols()); + if (isBucketed) { + bucketColsList.add(table.getBucketCols()); numBuckets = table.getNumBuckets(); + List sortCols = new ArrayList(); + for (Order colSortOrder : table.getSortCols()) { + sortCols.add(colSortOrder.getCol()); + } + sortedColsList.add(sortCols); } - OpTraits opTraits = new OpTraits(bucketCols, numBuckets); + OpTraits opTraits = new OpTraits(bucketColsList, numBuckets, sortedColsList); ts.setOpTraits(opTraits); return null; } @@ -197,7 +209,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, List> listBucketCols = new ArrayList>(); listBucketCols.add(gbyKeys); - OpTraits opTraits = new OpTraits(listBucketCols, -1); + OpTraits opTraits = new OpTraits(listBucketCols, -1, listBucketCols); gbyOp.setOpTraits(opTraits); return null; } @@ -205,22 +217,17 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, public static class SelectRule implements NodeProcessor { - @Override - public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, - Object... nodeOutputs) throws SemanticException { - SelectOperator selOp = (SelectOperator)nd; - List> parentBucketColNames = - selOp.getParentOperators().get(0).getOpTraits().getBucketColNames(); - + public List> getConvertedColNames(List> parentColNames, + SelectOperator selOp) { List> listBucketCols = new ArrayList>(); if (selOp.getColumnExprMap() != null) { - if (parentBucketColNames != null) { - for (List colNames : parentBucketColNames) { + if (parentColNames != null) { + for (List colNames : parentColNames) { List bucketColNames = new ArrayList(); for (String colName : colNames) { for (Entry entry : selOp.getColumnExprMap().entrySet()) { if (entry.getValue() instanceof ExprNodeColumnDesc) { - if(((ExprNodeColumnDesc)(entry.getValue())).getColumn().equals(colName)) { + if (((ExprNodeColumnDesc) (entry.getValue())).getColumn().equals(colName)) { bucketColNames.add(entry.getKey()); } } @@ -231,11 +238,34 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } } + return listBucketCols; + } + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + SelectOperator selOp = (SelectOperator)nd; + List> parentBucketColNames = + selOp.getParentOperators().get(0).getOpTraits().getBucketColNames(); + + List> listBucketCols = null; + List> listSortCols = null; + if (selOp.getColumnExprMap() != null) { + if (parentBucketColNames != null) { + listBucketCols = getConvertedColNames(parentBucketColNames, selOp); + } + List> parentSortColNames = selOp.getParentOperators().get(0).getOpTraits() + .getSortCols(); + if (parentSortColNames != null) { + listSortCols = getConvertedColNames(parentSortColNames, selOp); + } + } + int numBuckets = -1; if (selOp.getParentOperators().get(0).getOpTraits() != null) { numBuckets = selOp.getParentOperators().get(0).getOpTraits().getNumBuckets(); } - OpTraits opTraits = new OpTraits(listBucketCols, numBuckets); + OpTraits opTraits = new OpTraits(listBucketCols, numBuckets, listSortCols); selOp.setOpTraits(opTraits); return null; } @@ -248,6 +278,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { JoinOperator joinOp = (JoinOperator)nd; List> bucketColsList = new ArrayList>(); + List> sortColsList = new ArrayList>(); byte pos = 0; for (Operator parentOp : joinOp.getParentOperators()) { if (!(parentOp instanceof ReduceSinkOperator)) { @@ -259,26 +290,24 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, ReduceSinkRule rsRule = new ReduceSinkRule(); rsRule.process(rsOp, stack, procCtx, nodeOutputs); } - bucketColsList.add(getOutputColNames(joinOp, rsOp, pos)); + bucketColsList.add(getOutputColNames(joinOp, rsOp.getOpTraits().getBucketColNames(), pos)); + sortColsList.add(getOutputColNames(joinOp, rsOp.getOpTraits().getSortCols(), pos)); pos++; } - joinOp.setOpTraits(new OpTraits(bucketColsList, -1)); + joinOp.setOpTraits(new OpTraits(bucketColsList, -1, bucketColsList)); return null; } - private List getOutputColNames(JoinOperator joinOp, - ReduceSinkOperator rs, byte pos) { - List> parentBucketColNames = - rs.getOpTraits().getBucketColNames(); - - if (parentBucketColNames != null) { + private List getOutputColNames(JoinOperator joinOp, List> parentColNames, + byte pos) { + if (parentColNames != null) { List bucketColNames = new ArrayList(); // guaranteed that there is only 1 list within this list because // a reduce sink always brings down the bucketing cols to a single list. // may not be true with correlation operators (mux-demux) - List colNames = parentBucketColNames.get(0); + List colNames = parentColNames.get(0); for (String colName : colNames) { for (ExprNodeDesc exprNode : joinOp.getConf().getExprs().get(pos)) { if (exprNode instanceof ExprNodeColumnDesc) { @@ -317,7 +346,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { - OpTraits opTraits = new OpTraits(null, -1); + OpTraits opTraits = new OpTraits(null, -1, null); @SuppressWarnings("unchecked") Operator operator = (Operator)nd; operator.setOpTraits(opTraits); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java new file mode 100644 index 0000000..e9e052f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveDefaultRelMetadataProvider.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import com.google.common.collect.ImmutableList; + +import org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdDistinctRowCount; +import org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdRowCount; +import org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdSelectivity; +import org.apache.hadoop.hive.ql.optimizer.optiq.stats.HiveRelMdUniqueKeys; +import org.eigenbase.rel.metadata.ChainedRelMetadataProvider; +import org.eigenbase.rel.metadata.DefaultRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataProvider; + +public class HiveDefaultRelMetadataProvider { + private HiveDefaultRelMetadataProvider() { + } + + public static final RelMetadataProvider INSTANCE = ChainedRelMetadataProvider.of(ImmutableList + .of(HiveRelMdDistinctRowCount.SOURCE, + HiveRelMdSelectivity.SOURCE, + HiveRelMdRowCount.SOURCE, + HiveRelMdUniqueKeys.SOURCE, + new DefaultRelMetadataProvider())); +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java new file mode 100644 index 0000000..7c2b0cd --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveOptiqUtil.java @@ -0,0 +1,529 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.eigenbase.rel.RelFactories.ProjectFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SortRel; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.relopt.RelOptUtil.InputReferencedVisitor; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.fun.SqlStdOperatorTable; +import org.eigenbase.sql.validate.SqlValidatorUtil; +import org.eigenbase.util.Pair; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Lists; + +/** + * Generic utility functions needed for Optiq based Hive CBO. + */ + +public class HiveOptiqUtil { + + /** + * Get list of virtual columns from the given list of projections. + *

+ * + * @param exps + * list of rex nodes representing projections + * @return List of Virtual Columns, will not be null. + */ + public static List getVirtualCols(List exps) { + List vCols = new ArrayList(); + + for (int i = 0; i < exps.size(); i++) { + if (!(exps.get(i) instanceof RexInputRef)) { + vCols.add(i); + } + } + + return vCols; + } + + public static boolean validateASTForCBO(ASTNode ast) { + String astTree = ast.toStringTree(); + String[] tokens = { "TOK_CHARSETLITERAL" }; + for (String token : tokens) { + if (astTree.contains(token)) { + return false; + } + } + return true; + } + + public static List getProjsFromBelowAsInputRef(final RelNode rel) { + List projectList = Lists.transform(rel.getRowType().getFieldList(), + new Function() { + @Override + public RexNode apply(RelDataTypeField field) { + return rel.getCluster().getRexBuilder().makeInputRef(field.getType(), field.getIndex()); + } + }); + return projectList; + } + + public static List translateBitSetToProjIndx(BitSet projBitSet) { + List projIndxLst = new ArrayList(); + + for (int i = 0; i < projBitSet.length(); i++) { + if (projBitSet.get(i)) { + projIndxLst.add(i); + } + } + + return projIndxLst; + } + + /** + * Push any equi join conditions that are not column references as Projections + * on top of the children. + * + * @param factory + * Project factory to use. + * @param inputRels + * inputs to a join + * @param leftJoinKeys + * expressions for LHS of join key + * @param rightJoinKeys + * expressions for RHS of join key + * @param systemColCount + * number of system columns, usually zero. These columns are + * projected at the leading edge of the output row. + * @param leftKeys + * on return this contains the join key positions from the new + * project rel on the LHS. + * @param rightKeys + * on return this contains the join key positions from the new + * project rel on the RHS. + * @return the join condition after the equi expressions pushed down. + */ + public static RexNode projectNonColumnEquiConditions(ProjectFactory factory, RelNode[] inputRels, + List leftJoinKeys, List rightJoinKeys, int systemColCount, + List leftKeys, List rightKeys) { + RelNode leftRel = inputRels[0]; + RelNode rightRel = inputRels[1]; + RexBuilder rexBuilder = leftRel.getCluster().getRexBuilder(); + RexNode outJoinCond = null; + + int origLeftInputSize = leftRel.getRowType().getFieldCount(); + int origRightInputSize = rightRel.getRowType().getFieldCount(); + + List newLeftFields = new ArrayList(); + List newLeftFieldNames = new ArrayList(); + + List newRightFields = new ArrayList(); + List newRightFieldNames = new ArrayList(); + int leftKeyCount = leftJoinKeys.size(); + int i; + + for (i = 0; i < origLeftInputSize; i++) { + final RelDataTypeField field = leftRel.getRowType().getFieldList().get(i); + newLeftFields.add(rexBuilder.makeInputRef(field.getType(), i)); + newLeftFieldNames.add(field.getName()); + } + + for (i = 0; i < origRightInputSize; i++) { + final RelDataTypeField field = rightRel.getRowType().getFieldList().get(i); + newRightFields.add(rexBuilder.makeInputRef(field.getType(), i)); + newRightFieldNames.add(field.getName()); + } + + int newKeyCount = 0; + List> origColEqConds = new ArrayList>(); + for (i = 0; i < leftKeyCount; i++) { + RexNode leftKey = leftJoinKeys.get(i); + RexNode rightKey = rightJoinKeys.get(i); + + if (leftKey instanceof RexInputRef && rightKey instanceof RexInputRef) { + origColEqConds.add(Pair.of(((RexInputRef) leftKey).getIndex(), + ((RexInputRef) rightKey).getIndex())); + } else { + newLeftFields.add(leftKey); + newLeftFieldNames.add(null); + newRightFields.add(rightKey); + newRightFieldNames.add(null); + newKeyCount++; + } + } + + for (i = 0; i < origColEqConds.size(); i++) { + Pair p = origColEqConds.get(i); + RexNode leftKey = leftJoinKeys.get(i); + RexNode rightKey = rightJoinKeys.get(i); + leftKeys.add(p.left); + rightKeys.add(p.right); + RexNode cond = rexBuilder.makeCall( + SqlStdOperatorTable.EQUALS, + rexBuilder.makeInputRef(leftKey.getType(), systemColCount + p.left), + rexBuilder.makeInputRef(rightKey.getType(), systemColCount + origLeftInputSize + + newKeyCount + p.right)); + if (outJoinCond == null) { + outJoinCond = cond; + } else { + outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, cond); + } + } + + if (newKeyCount == 0) { + return outJoinCond; + } + + int newLeftOffset = systemColCount + origLeftInputSize; + int newRightOffset = systemColCount + origLeftInputSize + origRightInputSize + newKeyCount; + for (i = 0; i < newKeyCount; i++) { + leftKeys.add(origLeftInputSize + i); + rightKeys.add(origRightInputSize + i); + RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, + rexBuilder.makeInputRef(newLeftFields.get(i).getType(), newLeftOffset + i), + rexBuilder.makeInputRef(newLeftFields.get(i).getType(), newRightOffset + i)); + if (outJoinCond == null) { + outJoinCond = cond; + } else { + outJoinCond = rexBuilder.makeCall(SqlStdOperatorTable.AND, outJoinCond, cond); + } + } + + // added project if need to produce new keys than the original input + // fields + if (newKeyCount > 0) { + leftRel = factory.createProject(leftRel, newLeftFields, + SqlValidatorUtil.uniquify(newLeftFieldNames)); + rightRel = factory.createProject(rightRel, newRightFields, + SqlValidatorUtil.uniquify(newRightFieldNames)); + } + + inputRels[0] = leftRel; + inputRels[1] = rightRel; + + return outJoinCond; + } + + /** + * JoinPredicateInfo represents Join condition; JoinPredicate Info uses + * JoinLeafPredicateInfo to represent individual conjunctive elements in the + * predicate.
+ * JoinPredicateInfo = JoinLeafPredicateInfo1 and JoinLeafPredicateInfo2...
+ *

+ * JoinPredicateInfo:
+ * 1. preserves the order of conjuctive elements for + * equi-join(m_equiJoinPredicateElements)
+ * 2. Stores set of projection indexes from left and right child which is part + * of equi join keys; the indexes are both in child and Join node schema.
+ * 3. Keeps a map of projection indexes that are part of join keys to list of + * conjuctive elements(JoinLeafPredicateInfo) that uses them. + * + */ + public static class JoinPredicateInfo { + private final ImmutableList nonEquiJoinPredicateElements; + private final ImmutableList equiJoinPredicateElements; + private final ImmutableSet projsFromLeftPartOfJoinKeysInChildSchema; + private final ImmutableSet projsFromRightPartOfJoinKeysInChildSchema; + private final ImmutableSet projsFromRightPartOfJoinKeysInJoinSchema; + private final ImmutableMap> mapOfProjIndxInJoinSchemaToLeafPInfo; + + public JoinPredicateInfo(List nonEquiJoinPredicateElements, + List equiJoinPredicateElements, + Set projsFromLeftPartOfJoinKeysInChildSchema, + Set projsFromRightPartOfJoinKeysInChildSchema, + Set projsFromRightPartOfJoinKeysInJoinSchema, + Map> mapOfProjIndxInJoinSchemaToLeafPInfo) { + this.nonEquiJoinPredicateElements = ImmutableList.copyOf(nonEquiJoinPredicateElements); + this.equiJoinPredicateElements = ImmutableList.copyOf(equiJoinPredicateElements); + this.projsFromLeftPartOfJoinKeysInChildSchema = ImmutableSet + .copyOf(projsFromLeftPartOfJoinKeysInChildSchema); + this.projsFromRightPartOfJoinKeysInChildSchema = ImmutableSet + .copyOf(projsFromRightPartOfJoinKeysInChildSchema); + this.projsFromRightPartOfJoinKeysInJoinSchema = ImmutableSet + .copyOf(projsFromRightPartOfJoinKeysInJoinSchema); + this.mapOfProjIndxInJoinSchemaToLeafPInfo = ImmutableMap + .copyOf(mapOfProjIndxInJoinSchemaToLeafPInfo); + } + + public List getNonEquiJoinPredicateElements() { + return nonEquiJoinPredicateElements; + } + + public List getEquiJoinPredicateElements() { + return equiJoinPredicateElements; + } + + public Set getProjsFromLeftPartOfJoinKeysInChildSchema() { + return projsFromLeftPartOfJoinKeysInChildSchema; + } + + public Set getProjsFromRightPartOfJoinKeysInChildSchema() { + return projsFromRightPartOfJoinKeysInChildSchema; + } + + /** + * NOTE: Join Schema = left Schema + (right Schema offset by + * left.fieldcount). Hence its ok to return projections from left in child + * schema. + */ + public Set getProjsFromLeftPartOfJoinKeysInJoinSchema() { + return projsFromLeftPartOfJoinKeysInChildSchema; + } + + public Set getProjsFromRightPartOfJoinKeysInJoinSchema() { + return projsFromRightPartOfJoinKeysInJoinSchema; + } + + public Map> getMapOfProjIndxToLeafPInfo() { + return mapOfProjIndxInJoinSchemaToLeafPInfo; + } + + public static JoinPredicateInfo constructJoinPredicateInfo(HiveJoinRel j) { + return constructJoinPredicateInfo(j, j.getCondition()); + } + + public static JoinPredicateInfo constructJoinPredicateInfo(HiveJoinRel j, RexNode predicate) { + JoinPredicateInfo jpi = null; + JoinLeafPredicateInfo jlpi = null; + List equiLPIList = new ArrayList(); + List nonEquiLPIList = new ArrayList(); + Set projsFromLeftPartOfJoinKeys = new HashSet(); + Set projsFromRightPartOfJoinKeys = new HashSet(); + Set projsFromRightPartOfJoinKeysInJoinSchema = new HashSet(); + Map> tmpMapOfProjIndxInJoinSchemaToLeafPInfo = new HashMap>(); + Map> mapOfProjIndxInJoinSchemaToLeafPInfo = new HashMap>(); + List tmpJLPILst = null; + int rightOffSet = j.getLeft().getRowType().getFieldCount(); + int projIndxInJoin; + List conjuctiveElements; + + // 1. Decompose Join condition to a number of leaf predicates + // (conjuctive elements) + conjuctiveElements = RelOptUtil.conjunctions(predicate); + + // 2. Walk through leaf predicates building up JoinLeafPredicateInfo + for (RexNode ce : conjuctiveElements) { + // 2.1 Construct JoinLeafPredicateInfo + jlpi = JoinLeafPredicateInfo.constructJoinLeafPredicateInfo(j, ce); + + // 2.2 Classify leaf predicate as Equi vs Non Equi + if (jlpi.m_comparisonType.equals(SqlKind.EQUALS)) { + equiLPIList.add(jlpi); + } else { + nonEquiLPIList.add(jlpi); + } + + // 2.3 Maintain join keys coming from left vs right (in child & + // Join Schema) + projsFromLeftPartOfJoinKeys.addAll(jlpi.getProjsFromLeftPartOfJoinKeysInChildSchema()); + projsFromRightPartOfJoinKeys.addAll(jlpi.getProjsFromRightPartOfJoinKeysInChildSchema()); + projsFromRightPartOfJoinKeysInJoinSchema.addAll(jlpi + .getProjsFromRightPartOfJoinKeysInJoinSchema()); + + // 2.4 Update Join Key to JoinLeafPredicateInfo map with keys + // from left + for (Integer projIndx : jlpi.getProjsFromLeftPartOfJoinKeysInChildSchema()) { + tmpJLPILst = tmpMapOfProjIndxInJoinSchemaToLeafPInfo.get(projIndx); + if (tmpJLPILst == null) + tmpJLPILst = new ArrayList(); + tmpJLPILst.add(jlpi); + tmpMapOfProjIndxInJoinSchemaToLeafPInfo.put(projIndx, tmpJLPILst); + } + + // 2.5 Update Join Key to JoinLeafPredicateInfo map with keys + // from right + for (Integer projIndx : jlpi.getProjsFromRightPartOfJoinKeysInChildSchema()) { + projIndxInJoin = projIndx + rightOffSet; + tmpJLPILst = tmpMapOfProjIndxInJoinSchemaToLeafPInfo.get(projIndxInJoin); + if (tmpJLPILst == null) + tmpJLPILst = new ArrayList(); + tmpJLPILst.add(jlpi); + tmpMapOfProjIndxInJoinSchemaToLeafPInfo.put(projIndxInJoin, tmpJLPILst); + } + + } + + // 3. Update Update Join Key to List to use + // ImmutableList + for (Entry> e : tmpMapOfProjIndxInJoinSchemaToLeafPInfo + .entrySet()) { + mapOfProjIndxInJoinSchemaToLeafPInfo.put(e.getKey(), ImmutableList.copyOf(e.getValue())); + } + + // 4. Construct JoinPredicateInfo + jpi = new JoinPredicateInfo(nonEquiLPIList, equiLPIList, projsFromLeftPartOfJoinKeys, + projsFromRightPartOfJoinKeys, projsFromRightPartOfJoinKeysInJoinSchema, + mapOfProjIndxInJoinSchemaToLeafPInfo); + return jpi; + } + } + + /** + * JoinLeafPredicateInfo represents leaf predicate in Join condition + * (conjuctive lement).
+ *

+ * JoinLeafPredicateInfo:
+ * 1. Stores list of expressions from left and right child which is part of + * equi join keys.
+ * 2. Stores set of projection indexes from left and right child which is part + * of equi join keys; the indexes are both in child and Join node schema.
+ */ + public static class JoinLeafPredicateInfo { + private final SqlKind m_comparisonType; + private final ImmutableList m_joinKeyExprsFromLeft; + private final ImmutableList m_joinKeyExprsFromRight; + private final ImmutableSet m_projsFromLeftPartOfJoinKeysInChildSchema; + private final ImmutableSet m_projsFromRightPartOfJoinKeysInChildSchema; + private final ImmutableSet m_projsFromRightPartOfJoinKeysInJoinSchema; + + public JoinLeafPredicateInfo(SqlKind comparisonType, List joinKeyExprsFromLeft, + List joinKeyExprsFromRight, Set projsFromLeftPartOfJoinKeysInChildSchema, + Set projsFromRightPartOfJoinKeysInChildSchema, + Set projsFromRightPartOfJoinKeysInJoinSchema) { + m_comparisonType = comparisonType; + m_joinKeyExprsFromLeft = ImmutableList.copyOf(joinKeyExprsFromLeft); + m_joinKeyExprsFromRight = ImmutableList.copyOf(joinKeyExprsFromRight); + m_projsFromLeftPartOfJoinKeysInChildSchema = ImmutableSet + .copyOf(projsFromLeftPartOfJoinKeysInChildSchema); + m_projsFromRightPartOfJoinKeysInChildSchema = ImmutableSet + .copyOf(projsFromRightPartOfJoinKeysInChildSchema); + m_projsFromRightPartOfJoinKeysInJoinSchema = ImmutableSet + .copyOf(projsFromRightPartOfJoinKeysInJoinSchema); + } + + public List getJoinKeyExprsFromLeft() { + return m_joinKeyExprsFromLeft; + } + + public List getJoinKeyExprsFromRight() { + return m_joinKeyExprsFromRight; + } + + public Set getProjsFromLeftPartOfJoinKeysInChildSchema() { + return m_projsFromLeftPartOfJoinKeysInChildSchema; + } + + /** + * NOTE: Join Schema = left Schema + (right Schema offset by + * left.fieldcount). Hence its ok to return projections from left in child + * schema. + */ + public Set getProjsFromLeftPartOfJoinKeysInJoinSchema() { + return m_projsFromLeftPartOfJoinKeysInChildSchema; + } + + public Set getProjsFromRightPartOfJoinKeysInChildSchema() { + return m_projsFromRightPartOfJoinKeysInChildSchema; + } + + public Set getProjsFromRightPartOfJoinKeysInJoinSchema() { + return m_projsFromRightPartOfJoinKeysInJoinSchema; + } + + private static JoinLeafPredicateInfo constructJoinLeafPredicateInfo(HiveJoinRel j, RexNode pe) { + JoinLeafPredicateInfo jlpi = null; + List filterNulls = new ArrayList(); + List joinKeyExprsFromLeft = new ArrayList(); + List joinKeyExprsFromRight = new ArrayList(); + Set projsFromLeftPartOfJoinKeysInChildSchema = new HashSet(); + Set projsFromRightPartOfJoinKeysInChildSchema = new HashSet(); + Set projsFromRightPartOfJoinKeysInJoinSchema = new HashSet(); + int rightOffSet = j.getLeft().getRowType().getFieldCount(); + + // 1. Split leaf join predicate to expressions from left, right + RelOptUtil.splitJoinCondition(j.getSystemFieldList(), j.getLeft(), j.getRight(), pe, + joinKeyExprsFromLeft, joinKeyExprsFromRight, filterNulls, null); + + // 2. For left expressions, collect child projection indexes used + InputReferencedVisitor irvLeft = new InputReferencedVisitor(); + irvLeft.apply(joinKeyExprsFromLeft); + projsFromLeftPartOfJoinKeysInChildSchema.addAll(irvLeft.inputPosReferenced); + + // 3. For right expressions, collect child projection indexes used + InputReferencedVisitor irvRight = new InputReferencedVisitor(); + irvRight.apply(joinKeyExprsFromRight); + projsFromRightPartOfJoinKeysInChildSchema.addAll(irvRight.inputPosReferenced); + + // 3. Translate projection indexes from right to join schema, by adding + // offset. + for (Integer indx : projsFromRightPartOfJoinKeysInChildSchema) { + projsFromRightPartOfJoinKeysInJoinSchema.add(indx + rightOffSet); + } + + // 4. Construct JoinLeafPredicateInfo + jlpi = new JoinLeafPredicateInfo(pe.getKind(), joinKeyExprsFromLeft, joinKeyExprsFromRight, + projsFromLeftPartOfJoinKeysInChildSchema, projsFromRightPartOfJoinKeysInChildSchema, + projsFromRightPartOfJoinKeysInJoinSchema); + + return jlpi; + } + } + + public static boolean limitRelNode(RelNode rel) { + if ((rel instanceof SortRel) && ((SortRel) rel).getCollation().getFieldCollations().isEmpty()) + return true; + + return false; + } + + public static boolean orderRelNode(RelNode rel) { + if ((rel instanceof SortRel) && !((SortRel) rel).getCollation().getFieldCollations().isEmpty()) + return true; + + return false; + } + + /** + * Get top level select starting from root. Assumption here is root can only + * be SortRel & ProjectRel. Also the top project should be at most 2 levels + * below Sortrel; i.e SortRel(Limit)-SortRel(OB)-Select + * + * @param rootRel + * @return + */ + public static Pair getTopLevelSelect(final RelNode rootRel) { + RelNode tmpRel = rootRel; + RelNode parentOforiginalProjRel = rootRel; + HiveProjectRel originalProjRel = null; + + while (tmpRel != null) { + if (tmpRel instanceof HiveProjectRel) { + originalProjRel = (HiveProjectRel) tmpRel; + break; + } + parentOforiginalProjRel = tmpRel; + tmpRel = tmpRel.getInput(0); + } + + return (new Pair(parentOforiginalProjRel, originalProjRel)); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveTypeSystemImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveTypeSystemImpl.java new file mode 100644 index 0000000..1bc5a2c --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/HiveTypeSystemImpl.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import org.eigenbase.reltype.RelDataTypeSystemImpl; +import org.eigenbase.sql.type.SqlTypeName; + +public class HiveTypeSystemImpl extends RelDataTypeSystemImpl { + // TODO: This should come from type system; Currently there is no definition + // in type system for this. + private static final int MAX_DECIMAL_PRECISION = 38; + private static final int MAX_DECIMAL_SCALE = 38; + private static final int DEFAULT_DECIMAL_PRECISION = 10; + private static final int MAX_VARCHAR_PRECISION = 65535; + private static final int MAX_CHAR_PRECISION = 255; + private static final int MAX_BINARY_PRECISION = Integer.MAX_VALUE; + private static final int MAX_TIMESTAMP_PRECISION = 9; + + @Override + public int getMaxScale(SqlTypeName typeName) { + switch (typeName) { + case DECIMAL: + return getMaxNumericScale(); + case INTERVAL_DAY_TIME: + case INTERVAL_YEAR_MONTH: + return SqlTypeName.MAX_INTERVAL_FRACTIONAL_SECOND_PRECISION; + default: + return -1; + } + } + + @Override + public int getDefaultPrecision(SqlTypeName typeName) { + switch (typeName) { + // Hive will always require user to specify exact sizes for char, varchar; + // Binary doesn't need any sizes; Decimal has the default of 10. + case CHAR: + case VARCHAR: + case BINARY: + case VARBINARY: + case TIME: + case TIMESTAMP: + return getMaxPrecision(typeName); + case DECIMAL: + return DEFAULT_DECIMAL_PRECISION; + case INTERVAL_DAY_TIME: + case INTERVAL_YEAR_MONTH: + return SqlTypeName.DEFAULT_INTERVAL_START_PRECISION; + default: + return -1; + } + } + + @Override + public int getMaxPrecision(SqlTypeName typeName) { + switch (typeName) { + case DECIMAL: + return getMaxNumericPrecision(); + case VARCHAR: + return MAX_VARCHAR_PRECISION; + case CHAR: + return MAX_CHAR_PRECISION; + case VARBINARY: + case BINARY: + return MAX_BINARY_PRECISION; + case TIME: + case TIMESTAMP: + return MAX_TIMESTAMP_PRECISION; + case INTERVAL_DAY_TIME: + case INTERVAL_YEAR_MONTH: + return SqlTypeName.MAX_INTERVAL_START_PRECISION; + default: + return -1; + } + } + + @Override + public int getMaxNumericScale() { + return MAX_DECIMAL_SCALE; + } + + @Override + public int getMaxNumericPrecision() { + return MAX_DECIMAL_PRECISION; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/OptiqSemanticException.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/OptiqSemanticException.java new file mode 100644 index 0000000..d2b08fa --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/OptiqSemanticException.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.parse.SemanticException; + +/** + * Exception from SemanticAnalyzer. + */ + +public class OptiqSemanticException extends SemanticException { + + private static final long serialVersionUID = 1L; + + public OptiqSemanticException() { + super(); + } + + public OptiqSemanticException(String message) { + super(message); + } + + public OptiqSemanticException(Throwable cause) { + super(cause); + } + + public OptiqSemanticException(String message, Throwable cause) { + super(message, cause); + } + + public OptiqSemanticException(ErrorMsg errorMsg, String... msgArgs) { + super(errorMsg, msgArgs); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java new file mode 100644 index 0000000..080d27f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java @@ -0,0 +1,355 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.ExprNodeConverter; +import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.Statistics; +import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.TableAccessRel; +import org.eigenbase.relopt.RelOptAbstractTable; +import org.eigenbase.relopt.RelOptSchema; +import org.eigenbase.relopt.RelOptUtil.InputFinder; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.rex.RexNode; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableMap.Builder; + +public class RelOptHiveTable extends RelOptAbstractTable { + private final Table hiveTblMetadata; + private final String tblAlias; + private final ImmutableList hiveNonPartitionCols; + private final ImmutableMap hiveNonPartitionColsMap; + private final ImmutableMap hivePartitionColsMap; + private final int noOfProjs; + final HiveConf hiveConf; + + private double rowCount = -1; + Map hiveColStatsMap = new HashMap(); + PrunedPartitionList partitionList; + Map partitionCache; + AtomicInteger noColsMissingStats; + + protected static final Log LOG = LogFactory + .getLog(RelOptHiveTable.class + .getName()); + + public RelOptHiveTable(RelOptSchema optiqSchema, String qualifiedTblName, String tblAlias, RelDataType rowType, + Table hiveTblMetadata, List hiveNonPartitionCols, + List hivePartitionCols, HiveConf hconf, Map partitionCache, AtomicInteger noColsMissingStats) { + super(optiqSchema, qualifiedTblName, rowType); + this.hiveTblMetadata = hiveTblMetadata; + this.tblAlias = tblAlias; + this.hiveNonPartitionCols = ImmutableList.copyOf(hiveNonPartitionCols); + this.hiveNonPartitionColsMap = getColInfoMap(hiveNonPartitionCols, 0); + this.hivePartitionColsMap = getColInfoMap(hivePartitionCols, hiveNonPartitionColsMap.size()); + this.noOfProjs = hiveNonPartitionCols.size() + hivePartitionCols.size(); + this.hiveConf = hconf; + this.partitionCache = partitionCache; + this.noColsMissingStats = noColsMissingStats; + } + + private static ImmutableMap getColInfoMap(List hiveCols, + int startIndx) { + Builder bldr = ImmutableMap. builder(); + + int indx = startIndx; + for (ColumnInfo ci : hiveCols) { + bldr.put(indx, ci); + indx++; + } + + return bldr.build(); + } + + @Override + public boolean isKey(BitSet arg0) { + return false; + } + + @Override + public RelNode toRel(ToRelContext context) { + return new TableAccessRel(context.getCluster(), this); + } + + @Override + public T unwrap(Class arg0) { + return arg0.isInstance(this) ? arg0.cast(this) : null; + } + + @Override + public double getRowCount() { + if (rowCount == -1) { + if (null == partitionList) { + // we are here either unpartitioned table or partitioned table with no predicates + computePartitionList(hiveConf, null); + } + if (hiveTblMetadata.isPartitioned()) { + List rowCounts = StatsUtils.getBasicStatForPartitions( + hiveTblMetadata, partitionList.getNotDeniedPartns(), + StatsSetupConst.ROW_COUNT); + rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts); + + } else { + rowCount = StatsUtils.getNumRows(hiveTblMetadata); + } + } + + if (rowCount == -1) + noColsMissingStats.getAndIncrement(); + + return rowCount; + } + + public Table getHiveTableMD() { + return hiveTblMetadata; + } + + public String getTableAlias() { + // NOTE: Optiq considers tbls to be equal if their names are the same. Hence + // we need to provide Optiq the fully qualified table name (dbname.tblname) + // and not the user provided aliases. + // However in HIVE DB name can not appear in select list; in case of join + // where table names differ only in DB name, Hive would require user + // introducing explicit aliases for tbl. + if (tblAlias == null) + return hiveTblMetadata.getTableName(); + else + return tblAlias; + } + + private String getColNamesForLogging(Set colLst) { + StringBuffer sb = new StringBuffer(); + boolean firstEntry = true; + for (String colName : colLst) { + if (firstEntry) { + sb.append(colName); + firstEntry = false; + } else { + sb.append(", " + colName); + } + } + return sb.toString(); + } + + public void computePartitionList(HiveConf conf, RexNode pruneNode) { + + try { + if (!hiveTblMetadata.isPartitioned() || pruneNode == null || InputFinder.bits(pruneNode).length() == 0 ) { + // there is no predicate on partitioning column, we need all partitions in this case. + partitionList = PartitionPruner.prune(hiveTblMetadata, null, conf, getName(), partitionCache); + return; + } + + // We have valid pruning expressions, only retrieve qualifying partitions + ExprNodeDesc pruneExpr = pruneNode.accept(new ExprNodeConverter(getName(), getRowType(), true)); + + partitionList = PartitionPruner.prune(hiveTblMetadata, pruneExpr, conf, getName(), partitionCache); + } catch (HiveException he) { + throw new RuntimeException(he); + } + } + + private void updateColStats(Set projIndxLst) { + List nonPartColNamesThatRqrStats = new ArrayList(); + List nonPartColIndxsThatRqrStats = new ArrayList(); + List partColNamesThatRqrStats = new ArrayList(); + List partColIndxsThatRqrStats = new ArrayList(); + Set colNamesFailedStats = new HashSet(); + + // 1. Separate required columns to Non Partition and Partition Cols + ColumnInfo tmp; + for (Integer pi : projIndxLst) { + if (hiveColStatsMap.get(pi) == null) { + if ((tmp = hiveNonPartitionColsMap.get(pi)) != null) { + nonPartColNamesThatRqrStats.add(tmp.getInternalName()); + nonPartColIndxsThatRqrStats.add(pi); + } else if ((tmp = hivePartitionColsMap.get(pi)) != null) { + partColNamesThatRqrStats.add(tmp.getInternalName()); + partColIndxsThatRqrStats.add(pi); + } else { + noColsMissingStats.getAndIncrement(); + String logMsg = "Unable to find Column Index: " + pi + ", in " + + hiveTblMetadata.getCompleteName(); + LOG.error(logMsg); + throw new RuntimeException(logMsg); + } + } + } + + if (null == partitionList) { + // We could be here either because its an unpartitioned table or because + // there are no pruning predicates on a partitioned table. + computePartitionList(hiveConf, null); + } + + // 2. Obtain Col Stats for Non Partition Cols + if (nonPartColNamesThatRqrStats.size() > 0) { + List hiveColStats; + + if (!hiveTblMetadata.isPartitioned()) { + // 2.1 Handle the case for unpartitioned table. + hiveColStats = StatsUtils.getTableColumnStats(hiveTblMetadata, hiveNonPartitionCols, + nonPartColNamesThatRqrStats); + + // 2.1.1 Record Column Names that we needed stats for but couldn't + if (hiveColStats == null) { + colNamesFailedStats.addAll(nonPartColNamesThatRqrStats); + } else if (hiveColStats.size() != nonPartColNamesThatRqrStats.size()) { + Set setOfFiledCols = new HashSet(nonPartColNamesThatRqrStats); + + Set setOfObtainedColStats = new HashSet(); + for (ColStatistics cs : hiveColStats) { + setOfObtainedColStats.add(cs.getColumnName()); + } + setOfFiledCols.removeAll(setOfObtainedColStats); + + colNamesFailedStats.addAll(setOfFiledCols); + } + } else { + // 2.2 Obtain col stats for partitioned table. + try { + if (partitionList.getNotDeniedPartns().isEmpty()) { + // no need to make a metastore call + rowCount = 0; + hiveColStats = new ArrayList(); + for (String c : nonPartColNamesThatRqrStats) { + // add empty stats object for each column + hiveColStats.add(new ColStatistics(hiveTblMetadata.getTableName(), c, null)); + } + colNamesFailedStats.clear(); + } else { + Statistics stats = StatsUtils.collectStatistics(hiveConf, partitionList, + hiveTblMetadata, hiveNonPartitionCols, nonPartColNamesThatRqrStats, + nonPartColNamesThatRqrStats, true, true); + rowCount = stats.getNumRows(); + hiveColStats = new ArrayList(); + for (String c : nonPartColNamesThatRqrStats) { + ColStatistics cs = stats.getColumnStatisticsFromColName(c); + if (cs != null) { + hiveColStats.add(cs); + } else { + colNamesFailedStats.add(c); + } + } + } + } catch (HiveException e) { + String logMsg = "Collecting stats failed."; + LOG.error(logMsg); + throw new RuntimeException(logMsg); + } + } + + if (hiveColStats != null && hiveColStats.size() == nonPartColNamesThatRqrStats.size()) { + for (int i = 0; i < hiveColStats.size(); i++) { + hiveColStatsMap.put(nonPartColIndxsThatRqrStats.get(i), hiveColStats.get(i)); + } + } + } + + // 3. Obtain Stats for Partition Cols + if (colNamesFailedStats.isEmpty() && !partColNamesThatRqrStats.isEmpty()) { + ColStatistics cStats = null; + for (int i = 0; i < partColNamesThatRqrStats.size(); i++) { + cStats = new ColStatistics(hiveTblMetadata.getTableName(), + partColNamesThatRqrStats.get(i), hivePartitionColsMap.get( + partColIndxsThatRqrStats.get(i)).getTypeName()); + cStats.setCountDistint(getDistinctCount(partitionList.getPartitions(),partColNamesThatRqrStats.get(i))); + hiveColStatsMap.put(partColIndxsThatRqrStats.get(i), cStats); + } + } + + // 4. Warn user if we could get stats for required columns + if (!colNamesFailedStats.isEmpty()) { + String logMsg = "No Stats for " + hiveTblMetadata.getCompleteName() + ", Columns: " + + getColNamesForLogging(colNamesFailedStats); + LOG.error(logMsg); + noColsMissingStats.getAndAdd(colNamesFailedStats.size()); + throw new RuntimeException(logMsg); + } + } + + private int getDistinctCount(Set partitions, String partColName) { + Set distinctVals = new HashSet(partitions.size()); + for (Partition partition : partitions) { + distinctVals.add(partition.getSpec().get(partColName)); + } + return distinctVals.size(); + } + + public List getColStat(List projIndxLst) { + ImmutableList.Builder colStatsBldr = ImmutableList. builder(); + + if (projIndxLst != null) { + updateColStats(new HashSet(projIndxLst)); + for (Integer i : projIndxLst) { + colStatsBldr.add(hiveColStatsMap.get(i)); + } + } else { + List pILst = new ArrayList(); + for (Integer i = 0; i < noOfProjs; i++) { + pILst.add(i); + } + updateColStats(new HashSet(pILst)); + for (Integer pi : pILst) { + colStatsBldr.add(hiveColStatsMap.get(pi)); + } + } + + return colStatsBldr.build(); + } + + /* + * use to check if a set of columns are all partition columns. + * true only if: + * - all columns in BitSet are partition + * columns. + */ + public boolean containsPartitionColumnsOnly(BitSet cols) { + + for (int i = cols.nextSetBit(0); i >= 0; i++, i = cols.nextSetBit(i + 1)) { + if (!hivePartitionColsMap.containsKey(i)) { + return false; + } + } + return true; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java new file mode 100644 index 0000000..9b653d3 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java @@ -0,0 +1,19 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq; + + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelCollationImpl; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelTraitSet; + +public class TraitsUtil { + public static RelTraitSet getSortTraitSet(RelOptCluster cluster, RelTraitSet traitSet, + RelCollation collation) { + return traitSet.plus(collation); + } + + public static RelTraitSet getDefaultTraitSet(RelOptCluster cluster) { + return cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java new file mode 100644 index 0000000..72fe5d6 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCost.java @@ -0,0 +1,212 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.cost; + +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptCostFactory; +import org.eigenbase.relopt.RelOptUtil; + +// TODO: This should inherit from VolcanoCost and should just override isLE method. +public class HiveCost implements RelOptCost { + // ~ Static fields/initializers --------------------------------------------- + + public static final HiveCost INFINITY = new HiveCost(Double.POSITIVE_INFINITY, + Double.POSITIVE_INFINITY, + Double.POSITIVE_INFINITY) { + @Override + public String toString() { + return "{inf}"; + } + }; + + public static final HiveCost HUGE = new HiveCost(Double.MAX_VALUE, Double.MAX_VALUE, + Double.MAX_VALUE) { + @Override + public String toString() { + return "{huge}"; + } + }; + + public static final HiveCost ZERO = new HiveCost(0.0, 0.0, 0.0) { + @Override + public String toString() { + return "{0}"; + } + }; + + public static final HiveCost TINY = new HiveCost(1.0, 1.0, 0.0) { + @Override + public String toString() { + return "{tiny}"; + } + }; + + public static final RelOptCostFactory FACTORY = new Factory(); + + // ~ Instance fields -------------------------------------------------------- + + final double cpu; + final double io; + final double rowCount; + + // ~ Constructors ----------------------------------------------------------- + + HiveCost(double rowCount, double cpu, double io) { + assert rowCount >= 0d; + assert cpu >= 0d; + assert io >= 0d; + this.rowCount = rowCount; + this.cpu = cpu; + this.io = io; + } + + // ~ Methods ---------------------------------------------------------------- + + public double getCpu() { + return cpu; + } + + public boolean isInfinite() { + return (this == INFINITY) || (this.rowCount == Double.POSITIVE_INFINITY) + || (this.cpu == Double.POSITIVE_INFINITY) || (this.io == Double.POSITIVE_INFINITY); + } + + public double getIo() { + return io; + } + + // TODO: If two cost is equal, could we do any better than comparing + // cardinality (may be some other heuristics to break the tie) + public boolean isLe(RelOptCost other) { + return this == other || this.rowCount <= other.getRows(); + /* + * if (((this.dCpu + this.dIo) < (other.getCpu() + other.getIo())) || + * ((this.dCpu + this.dIo) == (other.getCpu() + other.getIo()) && this.dRows + * <= other.getRows())) { return true; } else { return false; } + */ + } + + public boolean isLt(RelOptCost other) { + return this.rowCount < other.getRows(); + /* + * return isLe(other) && !equals(other); + */ + } + + public double getRows() { + return rowCount; + } + + public boolean equals(RelOptCost other) { + return (this == other) || ((this.rowCount) == (other.getRows())); + + /* + * //TODO: should we consider cardinality as well? return (this == other) || + * ((this.dCpu + this.dIo) == (other.getCpu() + other.getIo())); + */ + } + + public boolean isEqWithEpsilon(RelOptCost other) { + return (this == other) || (Math.abs((this.rowCount) - (other.getRows())) < RelOptUtil.EPSILON); + // Turn this one once we do the Algorithm selection in CBO + /* + * return (this == other) || (Math.abs((this.dCpu + this.dIo) - + * (other.getCpu() + other.getIo())) < RelOptUtil.EPSILON); + */ + } + + public RelOptCost minus(RelOptCost other) { + if (this == INFINITY) { + return this; + } + + return new HiveCost(this.rowCount - other.getRows(), this.cpu - other.getCpu(), this.io + - other.getIo()); + } + + public RelOptCost multiplyBy(double factor) { + if (this == INFINITY) { + return this; + } + return new HiveCost(rowCount * factor, cpu * factor, io * factor); + } + + public double divideBy(RelOptCost cost) { + // Compute the geometric average of the ratios of all of the factors + // which are non-zero and finite. + double d = 1; + double n = 0; + if ((this.rowCount != 0) && !Double.isInfinite(this.rowCount) && (cost.getRows() != 0) + && !Double.isInfinite(cost.getRows())) { + d *= this.rowCount / cost.getRows(); + ++n; + } + if ((this.cpu != 0) && !Double.isInfinite(this.cpu) && (cost.getCpu() != 0) + && !Double.isInfinite(cost.getCpu())) { + d *= this.cpu / cost.getCpu(); + ++n; + } + if ((this.io != 0) && !Double.isInfinite(this.io) && (cost.getIo() != 0) + && !Double.isInfinite(cost.getIo())) { + d *= this.io / cost.getIo(); + ++n; + } + if (n == 0) { + return 1.0; + } + return Math.pow(d, 1 / n); + } + + public RelOptCost plus(RelOptCost other) { + if ((this == INFINITY) || (other.isInfinite())) { + return INFINITY; + } + return new HiveCost(this.rowCount + other.getRows(), this.cpu + other.getCpu(), this.io + + other.getIo()); + } + + @Override + public String toString() { + return "{" + rowCount + " rows, " + cpu + " cpu, " + io + " io}"; + } + + private static class Factory implements RelOptCostFactory { + private Factory() { + } + + public RelOptCost makeCost(double rowCount, double cpu, double io) { + return new HiveCost(rowCount, cpu, io); + } + + public RelOptCost makeHugeCost() { + return HUGE; + } + + public HiveCost makeInfiniteCost() { + return INFINITY; + } + + public HiveCost makeTinyCost() { + return TINY; + } + + public HiveCost makeZeroCost() { + return ZERO; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java new file mode 100644 index 0000000..7436f12 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveCostUtil.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.cost; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.eigenbase.relopt.RelOptCost; + +// Use this once we have Join Algorithm selection +public class HiveCostUtil { + private static final double cpuCostInNanoSec = 1.0; + private static final double netCostInNanoSec = 150 * cpuCostInNanoSec; + private static final double localFSWriteCostInNanoSec = 4 * netCostInNanoSec; + private static final double localFSReadCostInNanoSec = 4 * netCostInNanoSec; + private static final double hDFSWriteCostInNanoSec = 10 * localFSWriteCostInNanoSec; + @SuppressWarnings("unused") +//Use this once we have Join Algorithm selection + private static final double hDFSReadCostInNanoSec = 1.5 * localFSReadCostInNanoSec; + + public static RelOptCost computCardinalityBasedCost(HiveRel hr) { + return new HiveCost(hr.getRows(), 0, 0); + } + + public static HiveCost computeCost(HiveTableScanRel t) { + double cardinality = t.getRows(); + return new HiveCost(cardinality, 0, hDFSWriteCostInNanoSec * cardinality * 0); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java new file mode 100644 index 0000000..15596bc --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java @@ -0,0 +1,31 @@ +package org.apache.hadoop.hive.ql.optimizer.optiq.cost; + +import org.eigenbase.rel.RelCollationTraitDef; +import org.eigenbase.relopt.ConventionTraitDef; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.volcano.VolcanoPlanner; + +/** + * Refinement of {@link org.eigenbase.relopt.volcano.VolcanoPlanner} for Hive. + * + *

+ * It uses {@link org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost} as + * its cost model. + */ +public class HiveVolcanoPlanner extends VolcanoPlanner { + private static final boolean ENABLE_COLLATION_TRAIT = true; + + /** Creates a HiveVolcanoPlanner. */ + public HiveVolcanoPlanner() { + super(HiveCost.FACTORY, null); + } + + public static RelOptPlanner createPlanner() { + final VolcanoPlanner planner = new HiveVolcanoPlanner(); + planner.addRelTraitDef(ConventionTraitDef.INSTANCE); + if (ENABLE_COLLATION_TRAIT) { + planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); + } + return planner; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java new file mode 100644 index 0000000..fc19895 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveAggregateRel.java @@ -0,0 +1,84 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.BitSet; +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.AggregateCall; +import org.eigenbase.rel.AggregateRelBase; +import org.eigenbase.rel.InvalidRelException; +import org.eigenbase.rel.RelFactories.AggregateFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; + +public class HiveAggregateRel extends AggregateRelBase implements HiveRel { + + public static final HiveAggRelFactory HIVE_AGGR_REL_FACTORY = new HiveAggRelFactory(); + + public HiveAggregateRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, + BitSet groupSet, List aggCalls) throws InvalidRelException { + super(cluster, TraitsUtil.getDefaultTraitSet(cluster), child, groupSet, aggCalls); + } + + @Override + public AggregateRelBase copy(RelTraitSet traitSet, RelNode input, BitSet groupSet, + List aggCalls) { + try { + return new HiveAggregateRel(getCluster(), traitSet, input, groupSet, aggCalls); + } catch (InvalidRelException e) { + // Semantic error not possible. Must be a bug. Convert to + // internal error. + throw new AssertionError(e); + } + } + + @Override + public void implement(Implementor implementor) { + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } + + @Override + public double getRows() { + return RelMetadataQuery.getDistinctRowCount(this, groupSet, getCluster().getRexBuilder() + .makeLiteral(true)); + } + + private static class HiveAggRelFactory implements AggregateFactory { + + @Override + public RelNode createAggregate(RelNode child, BitSet groupSet, + List aggCalls) { + try { + return new HiveAggregateRel(child.getCluster(), child.getTraitSet(), child, groupSet, aggCalls); + } catch (InvalidRelException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java new file mode 100644 index 0000000..8b85046 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveFilterRel.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.RelFactories.FilterFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.rex.RexNode; + +public class HiveFilterRel extends FilterRelBase implements HiveRel { + + public static final FilterFactory DEFAULT_FILTER_FACTORY = new HiveFilterFactoryImpl(); + + public HiveFilterRel(RelOptCluster cluster, RelTraitSet traits, RelNode child, RexNode condition) { + super(cluster, TraitsUtil.getDefaultTraitSet(cluster), child, condition); + } + + @Override + public FilterRelBase copy(RelTraitSet traitSet, RelNode input, RexNode condition) { + assert traitSet.containsIfApplicable(HiveRel.CONVENTION); + return new HiveFilterRel(getCluster(), traitSet, input, getCondition()); + } + + @Override + public void implement(Implementor implementor) { + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } + + /** + * Implementation of {@link FilterFactory} that returns + * {@link org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel} + * . + */ + private static class HiveFilterFactoryImpl implements FilterFactory { + @Override + public RelNode createFilter(RelNode child, RexNode condition) { + RelOptCluster cluster = child.getCluster(); + HiveFilterRel filter = new HiveFilterRel(cluster, TraitsUtil.getDefaultTraitSet(cluster), child, condition); + return filter; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java new file mode 100644 index 0000000..3d6aa84 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveJoinRel.java @@ -0,0 +1,157 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.Collections; +import java.util.Set; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.InvalidRelException; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.RelFactories.JoinFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexNode; + +//TODO: Should we convert MultiJoin to be a child of HiveJoinRelBase +public class HiveJoinRel extends JoinRelBase implements HiveRel { + // NOTE: COMMON_JOIN & SMB_JOIN are Sort Merge Join (in case of COMMON_JOIN + // each parallel computation handles multiple splits where as in case of SMB + // each parallel computation handles one bucket). MAP_JOIN and BUCKET_JOIN is + // hash joins where MAP_JOIN keeps the whole data set of non streaming tables + // in memory where as BUCKET_JOIN keeps only the b + public enum JoinAlgorithm { + NONE, COMMON_JOIN, MAP_JOIN, BUCKET_JOIN, SMB_JOIN + } + + public enum MapJoinStreamingRelation { + NONE, LEFT_RELATION, RIGHT_RELATION + } + + public static final JoinFactory HIVE_JOIN_FACTORY = new HiveJoinFactoryImpl(); + + private final boolean leftSemiJoin; + private final JoinAlgorithm joinAlgorithm; + //This will be used once we do Join Algorithm selection + @SuppressWarnings("unused") + private final MapJoinStreamingRelation mapJoinStreamingSide = MapJoinStreamingRelation.NONE; + + public static HiveJoinRel getJoin(RelOptCluster cluster, RelNode left, RelNode right, + RexNode condition, JoinRelType joinType, boolean leftSemiJoin) { + try { + Set variablesStopped = Collections.emptySet(); + return new HiveJoinRel(cluster, null, left, right, condition, joinType, variablesStopped, + JoinAlgorithm.NONE, null, leftSemiJoin); + } catch (InvalidRelException e) { + throw new RuntimeException(e); + } + } + + protected HiveJoinRel(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right, + RexNode condition, JoinRelType joinType, Set variablesStopped, + JoinAlgorithm joinAlgo, MapJoinStreamingRelation streamingSideForMapJoin, boolean leftSemiJoin) + throws InvalidRelException { + super(cluster, TraitsUtil.getDefaultTraitSet(cluster), left, right, condition, joinType, + variablesStopped); + this.joinAlgorithm = joinAlgo; + this.leftSemiJoin = leftSemiJoin; + } + + @Override + public void implement(Implementor implementor) { + } + + @Override + public final HiveJoinRel copy(RelTraitSet traitSet, RexNode conditionExpr, RelNode left, + RelNode right, JoinRelType joinType, boolean semiJoinDone) { + try { + Set variablesStopped = Collections.emptySet(); + return new HiveJoinRel(getCluster(), traitSet, left, right, conditionExpr, joinType, + variablesStopped, JoinAlgorithm.NONE, null, leftSemiJoin); + } catch (InvalidRelException e) { + // Semantic error not possible. Must be a bug. Convert to + // internal error. + throw new AssertionError(e); + } + } + + public JoinAlgorithm getJoinAlgorithm() { + return joinAlgorithm; + } + + public boolean isLeftSemiJoin() { + return leftSemiJoin; + } + + /** + * Model cost of join as size of Inputs. + */ + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + double leftRCount = RelMetadataQuery.getRowCount(getLeft()); + double rightRCount = RelMetadataQuery.getRowCount(getRight()); + return HiveCost.FACTORY.makeCost(leftRCount + rightRCount, 0.0, 0.0); + } + + /** + * @return returns rowtype representing only the left join input + */ + @Override + public RelDataType deriveRowType() { + if (leftSemiJoin) { + return deriveJoinRowType(left.getRowType(), null, JoinRelType.INNER, + getCluster().getTypeFactory(), null, + Collections. emptyList()); + } + return super.deriveRowType(); + } + + private static class HiveJoinFactoryImpl implements JoinFactory { + /** + * Creates a join. + * + * @param left + * Left input + * @param right + * Right input + * @param condition + * Join condition + * @param joinType + * Join type + * @param variablesStopped + * Set of names of variables which are set by the LHS and used by + * the RHS and are not available to nodes above this JoinRel in the + * tree + * @param semiJoinDone + * Whether this join has been translated to a semi-join + */ + @Override + public RelNode createJoin(RelNode left, RelNode right, RexNode condition, JoinRelType joinType, + Set variablesStopped, boolean semiJoinDone) { + return getJoin(left.getCluster(), left, right, condition, joinType, false); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java new file mode 100644 index 0000000..f8755d0 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveLimitRel.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SingleRel; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.rex.RexNode; + +public class HiveLimitRel extends SingleRel implements HiveRel { + private final RexNode offset; + private final RexNode fetch; + + HiveLimitRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, RexNode offset, + RexNode fetch) { + super(cluster, TraitsUtil.getDefaultTraitSet(cluster), child); + this.offset = offset; + this.fetch = fetch; + assert getConvention() instanceof HiveRel; + assert getConvention() == child.getConvention(); + } + + @Override + public HiveLimitRel copy(RelTraitSet traitSet, List newInputs) { + return new HiveLimitRel(getCluster(), traitSet, sole(newInputs), offset, fetch); + } + + public void implement(Implementor implementor) { + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java new file mode 100644 index 0000000..7b434ea --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveProjectRel.java @@ -0,0 +1,204 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import com.google.common.collect.ImmutableList; + +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException; +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelFactories.ProjectFactory; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.util.Util; +import org.eigenbase.util.mapping.Mapping; +import org.eigenbase.util.mapping.MappingType; + +public class HiveProjectRel extends ProjectRelBase implements HiveRel { + + public static final ProjectFactory DEFAULT_PROJECT_FACTORY = new HiveProjectFactoryImpl(); + + private final List virtualCols; + + /** + * Creates a HiveProjectRel. + * + * @param cluster + * Cluster this relational expression belongs to + * @param child + * input relational expression + * @param exps + * List of expressions for the input columns + * @param rowType + * output row type + * @param flags + * values as in {@link ProjectRelBase.Flags} + */ + public HiveProjectRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, + List exps, RelDataType rowType, int flags) { + super(cluster, traitSet, child, exps, rowType, flags); + virtualCols = ImmutableList.copyOf(HiveOptiqUtil.getVirtualCols(exps)); + } + + /** + * Creates a HiveProjectRel with no sort keys. + * + * @param child + * input relational expression + * @param exps + * set of expressions for the input columns + * @param fieldNames + * aliases of the expressions + */ + public static HiveProjectRel create(RelNode child, List exps, + List fieldNames) throws OptiqSemanticException{ + RelOptCluster cluster = child.getCluster(); + + // 1 Ensure columnNames are unique - OPTIQ-411 + if (fieldNames != null && !Util.isDistinct(fieldNames)) { + String msg = "Select list contains multiple expressions with the same name." + fieldNames; + throw new OptiqSemanticException(msg); + } + RelDataType rowType = RexUtil.createStructType(cluster.getTypeFactory(), exps, fieldNames); + return create(cluster, child, exps, rowType, Collections. emptyList()); + } + + /** + * Creates a HiveProjectRel. + */ + public static HiveProjectRel create(RelOptCluster cluster, RelNode child, List exps, + RelDataType rowType, final List collationList) { + RelTraitSet traitSet = TraitsUtil.getDefaultTraitSet(cluster); + return new HiveProjectRel(cluster, traitSet, child, exps, rowType, Flags.BOXED); + } + + /** + * Creates a HiveProjectRel. + */ + public static HiveProjectRel create(RelOptCluster cluster, RelNode child, List exps, + RelDataType rowType, RelTraitSet traitSet, final List collationList) { + return new HiveProjectRel(cluster, traitSet, child, exps, rowType, Flags.BOXED); + } + + /** + * Creates a relational expression which projects the output fields of a + * relational expression according to a partial mapping. + * + *

+ * A partial mapping is weaker than a permutation: every target has one + * source, but a source may have 0, 1 or more than one targets. Usually the + * result will have fewer fields than the source, unless some source fields + * are projected multiple times. + * + *

+ * This method could optimize the result as {@link #permute} does, but does + * not at present. + * + * @param rel + * Relational expression + * @param mapping + * Mapping from source fields to target fields. The mapping type must + * obey the constraints {@link MappingType#isMandatorySource()} and + * {@link MappingType#isSingleSource()}, as does + * {@link MappingType#INVERSE_FUNCTION}. + * @param fieldNames + * Field names; if null, or if a particular entry is null, the name + * of the permuted field is used + * @return relational expression which projects a subset of the input fields + * @throws OptiqSemanticException + */ + public static RelNode projectMapping(RelNode rel, Mapping mapping, List fieldNames) throws OptiqSemanticException { + assert mapping.getMappingType().isSingleSource(); + assert mapping.getMappingType().isMandatorySource(); + + if (mapping.isIdentity()) { + return rel; + } + + final List outputNameList = new ArrayList(); + final List outputProjList = new ArrayList(); + final List fields = rel.getRowType().getFieldList(); + final RexBuilder rexBuilder = rel.getCluster().getRexBuilder(); + + for (int i = 0; i < mapping.getTargetCount(); i++) { + int source = mapping.getSource(i); + final RelDataTypeField sourceField = fields.get(source); + outputNameList + .add(((fieldNames == null) || (fieldNames.size() <= i) || (fieldNames.get(i) == null)) ? sourceField + .getName() : fieldNames.get(i)); + outputProjList.add(rexBuilder.makeInputRef(rel, source)); + } + + return create(rel, outputProjList, outputNameList); + } + + @Override + public ProjectRelBase copy(RelTraitSet traitSet, RelNode input, List exps, + RelDataType rowType) { + assert traitSet.containsIfApplicable(HiveRel.CONVENTION); + return new HiveProjectRel(getCluster(), traitSet, input, exps, rowType, getFlags()); + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } + + @Override + public void implement(Implementor implementor) { + } + + public List getVirtualCols() { + return virtualCols; + } + + /** + * Implementation of {@link ProjectFactory} that returns + * {@link org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel} + * . + */ + private static class HiveProjectFactoryImpl implements ProjectFactory { + + @Override + public RelNode createProject(RelNode child, + List childExprs, List fieldNames) { + RelOptCluster cluster = child.getCluster(); + RelDataType rowType = RexUtil.createStructType(cluster.getTypeFactory(), childExprs, fieldNames); + RelNode project = HiveProjectRel.create(cluster, child, + childExprs, rowType, + child.getTraitSet(), Collections. emptyList()); + + return project; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java new file mode 100644 index 0000000..4738c4a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveRel.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.Convention; + +public interface HiveRel extends RelNode { + void implement(Implementor implementor); + + /** Calling convention for relational operations that occur in Hive. */ + final Convention CONVENTION = new Convention.Impl("HIVE", HiveRel.class); + + class Implementor { + + public void visitChild(int ordinal, RelNode input) { + assert ordinal == 0; + ((HiveRel) input).implement(this); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java new file mode 100644 index 0000000..f85363d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveSortRel.java @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.Map; + +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelFactories; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SortRel; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.rex.RexNode; + +import com.google.common.collect.ImmutableMap; + +public class HiveSortRel extends SortRel implements HiveRel { + + public static final HiveSortRelFactory HIVE_SORT_REL_FACTORY = new HiveSortRelFactory(); + + // NOTE: this is to work around Hive Optiq Limitations w.r.t OB. + // 1. Optiq can not accept expressions in OB; instead it needs to be expressed + // as VC in input Select. + // 2. Hive can not preserve ordering through select boundaries. + // 3. This map is used for outermost OB to migrate the VC corresponding OB + // expressions from input select. + // 4. This is used by ASTConverter after we are done with Optiq Planning + private ImmutableMap mapOfInputRefToRexCall; + + public HiveSortRel(RelOptCluster cluster, RelTraitSet traitSet, RelNode child, + RelCollation collation, RexNode offset, RexNode fetch) { + super(cluster, TraitsUtil.getSortTraitSet(cluster, traitSet, collation), child, collation, + offset, fetch); + } + + @Override + public HiveSortRel copy(RelTraitSet traitSet, RelNode newInput, RelCollation newCollation, + RexNode offset, RexNode fetch) { + // TODO: can we blindly copy sort trait? What if inputs changed and we + // are now sorting by different cols + RelCollation canonizedCollation = traitSet.canonize(newCollation); + return new HiveSortRel(getCluster(), traitSet, newInput, canonizedCollation, offset, fetch); + } + + public RexNode getFetchExpr() { + return fetch; + } + + public void setInputRefToCallMap(ImmutableMap refToCall) { + this.mapOfInputRefToRexCall = refToCall; + } + + public Map getInputRefToCallMap() { + return this.mapOfInputRefToRexCall; + } + + @Override + public void implement(Implementor implementor) { + } + + private static class HiveSortRelFactory implements RelFactories.SortFactory { + + @Override + public RelNode createSort(RelTraitSet traits, RelNode child, RelCollation collation, + RexNode offset, RexNode fetch) { + return new HiveSortRel(child.getCluster(), traits, child, collation, offset, fetch); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java new file mode 100644 index 0000000..bd66459 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveTableScanRel.java @@ -0,0 +1,92 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.reltype.RelDataType; + + +/** + * Relational expression representing a scan of a HiveDB collection. + * + *

+ * Additional operations might be applied, using the "find" or "aggregate" + * methods. + *

+ */ +public class HiveTableScanRel extends TableAccessRelBase implements HiveRel { + + /** + * Creates a HiveTableScan. + * + * @param cluster + * Cluster + * @param traitSet + * Traits + * @param table + * Table + * @param table + * HiveDB table + */ + public HiveTableScanRel(RelOptCluster cluster, RelTraitSet traitSet, RelOptHiveTable table, + RelDataType rowtype) { + super(cluster, TraitsUtil.getDefaultTraitSet(cluster), table); + assert getConvention() == HiveRel.CONVENTION; + } + + @Override + public RelNode copy(RelTraitSet traitSet, List inputs) { + assert inputs.isEmpty(); + return this; + } + + @Override + public RelOptCost computeSelfCost(RelOptPlanner planner) { + return HiveCost.FACTORY.makeZeroCost(); + } + + @Override + public void register(RelOptPlanner planner) { + + } + + @Override + public void implement(Implementor implementor) { + + } + + @Override + public double getRows() { + return ((RelOptHiveTable) table).getRowCount(); + } + + public List getColStat(List projIndxLst) { + return ((RelOptHiveTable) table).getColStat(projIndxLst); + } +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java new file mode 100644 index 0000000..d34fe95 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/reloperators/HiveUnionRel.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.reloperators; + +import java.util.List; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel.Implementor; +import org.eigenbase.rel.RelFactories; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SetOpRel; +import org.eigenbase.rel.UnionRelBase; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.sql.SqlKind; + +public class HiveUnionRel extends UnionRelBase { + + public static final HiveUnionRelFactory UNION_REL_FACTORY = new HiveUnionRelFactory(); + + public HiveUnionRel(RelOptCluster cluster, RelTraitSet traits, List inputs) { + super(cluster, traits, inputs, true); + } + + @Override + public SetOpRel copy(RelTraitSet traitSet, List inputs, boolean all) { + return new HiveUnionRel(this.getCluster(), traitSet, inputs); + } + + public void implement(Implementor implementor) { + } + + private static class HiveUnionRelFactory implements RelFactories.SetOpFactory { + + @Override + public RelNode createSetOp(SqlKind kind, List inputs, boolean all) { + if (kind != SqlKind.UNION) { + throw new IllegalStateException("Expected to get Set operator of type Union. Found : " + kind); + } + return new HiveUnionRel(inputs.get(0).getCluster(), inputs.get(0).getTraitSet(), inputs); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java new file mode 100644 index 0000000..d6581e6 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HiveMergeProjectRule.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.eigenbase.rel.rules.MergeProjectRule; + +//Currently not used, turn this on later +public class HiveMergeProjectRule extends MergeProjectRule { + public static final HiveMergeProjectRule INSTANCE = new HiveMergeProjectRule(); + + public HiveMergeProjectRule() { + super(true, HiveProjectRel.DEFAULT_PROJECT_FACTORY); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java new file mode 100644 index 0000000..ee19a6c --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePartitionPrunerRule.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.relopt.RelOptRule; +import org.eigenbase.relopt.RelOptRuleCall; +import org.eigenbase.rex.RexNode; +import org.eigenbase.util.Pair; + +public class HivePartitionPrunerRule extends RelOptRule { + + HiveConf conf; + + public HivePartitionPrunerRule(HiveConf conf) { + super(operand(HiveFilterRel.class, operand(HiveTableScanRel.class, none()))); + this.conf = conf; + } + + @Override + public void onMatch(RelOptRuleCall call) { + HiveFilterRel filter = call.rel(0); + HiveTableScanRel tScan = call.rel(1); + perform(call, filter, tScan); + } + + protected void perform(RelOptRuleCall call, FilterRelBase filter, + HiveTableScanRel tScan) { + + RelOptHiveTable hiveTable = (RelOptHiveTable) tScan.getTable(); + RexNode predicate = filter.getCondition(); + + Pair predicates = PartitionPruner + .extractPartitionPredicates(filter.getCluster(), hiveTable, predicate); + RexNode partColExpr = predicates.left; + hiveTable.computePartitionList(conf, partColExpr); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java new file mode 100644 index 0000000..da0f7a4 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.ListIterator; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptRule; +import org.eigenbase.relopt.RelOptRuleCall; +import org.eigenbase.relopt.RelOptRuleOperand; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.relopt.RelOptUtil.InputFinder; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.util.Holder; + +public abstract class HivePushFilterPastJoinRule extends RelOptRule { + + public static final HivePushFilterPastJoinRule FILTER_ON_JOIN = new HivePushFilterPastJoinRule( + operand(FilterRelBase.class, operand(HiveJoinRel.class, any())), + "HivePushFilterPastJoinRule:filter", true) { + @Override + public void onMatch(RelOptRuleCall call) { + HiveFilterRel filter = call.rel(0); + HiveJoinRel join = call.rel(1); + perform(call, filter, join); + } + }; + + public static final HivePushFilterPastJoinRule JOIN = new HivePushFilterPastJoinRule( + operand(HiveJoinRel.class, any()), "HivePushFilterPastJoinRule:no-filter", false) { + @Override + public void onMatch(RelOptRuleCall call) { + HiveJoinRel join = call.rel(0); + perform(call, null, join); + } + }; + + /** Whether to try to strengthen join-type. */ + private final boolean smart; + + // ~ Constructors ----------------------------------------------------------- + + /** + * Creates a PushFilterPastJoinRule with an explicit root operand. + */ + private HivePushFilterPastJoinRule(RelOptRuleOperand operand, String id, boolean smart) { + super(operand, "PushFilterRule: " + id); + this.smart = smart; + } + + // ~ Methods ---------------------------------------------------------------- + + protected void perform(RelOptRuleCall call, FilterRelBase filter, + JoinRelBase join) { + final List joinFilters = RelOptUtil.conjunctions(join + .getCondition()); + + /* + * todo: hb 6/26/14 for left SemiJoin we cannot push predicates yet. The + * assertion that num(JoinRel columns) = num(leftSrc) + num(rightSrc) + * doesn't hold. So RelOptUtil.classifyFilters fails. + */ + if (((HiveJoinRel) join).isLeftSemiJoin()) { + return; + } + + if (filter == null) { + // There is only the joinRel + // make sure it does not match a cartesian product joinRel + // (with "true" condition) otherwise this rule will be applied + // again on the new cartesian product joinRel. + boolean onlyTrueFilter = true; + for (RexNode joinFilter : joinFilters) { + if (!joinFilter.isAlwaysTrue()) { + onlyTrueFilter = false; + break; + } + } + + if (onlyTrueFilter) { + return; + } + } + + final List aboveFilters = filter != null ? RelOptUtil + .conjunctions(filter.getCondition()) : new ArrayList(); + + List leftFilters = new ArrayList(); + List rightFilters = new ArrayList(); + int origJoinFiltersSz = joinFilters.size(); + + // TODO - add logic to derive additional filters. E.g., from + // (t1.a = 1 AND t2.a = 2) OR (t1.b = 3 AND t2.b = 4), you can + // derive table filters: + // (t1.a = 1 OR t1.b = 3) + // (t2.a = 2 OR t2.b = 4) + + // Try to push down above filters. These are typically where clause + // filters. They can be pushed down if they are not on the NULL + // generating side. + boolean filterPushed = false; + final Holder joinTypeHolder = Holder.of(join.getJoinType()); + if (RelOptUtil.classifyFilters(join, aboveFilters, + join.getJoinType(), true, !join.getJoinType().generatesNullsOnLeft(), !join.getJoinType() + .generatesNullsOnRight(), joinFilters, leftFilters, rightFilters, joinTypeHolder, smart)) { + filterPushed = true; + } + + /* + * Any predicates pushed down to joinFilters that aren't equality + * conditions: put them back as aboveFilters because Hive doesn't support + * not equi join conditions. + */ + ListIterator filterIter = joinFilters.listIterator(); + while (filterIter.hasNext()) { + RexNode exp = filterIter.next(); + if (exp instanceof RexCall) { + RexCall c = (RexCall) exp; + if (c.getOperator().getKind() == SqlKind.EQUALS) { + boolean validHiveJoinFilter = true; + for (RexNode rn : c.getOperands()) { + // NOTE: Hive dis-allows projections from both left & right side + // of join condition. Example: Hive disallows + // (r1.x=r2.x)=(r1.y=r2.y) on join condition. + if (filterRefersToBothSidesOfJoin(rn, join)) { + validHiveJoinFilter = false; + break; + } + } + if (validHiveJoinFilter) + continue; + } + } + aboveFilters.add(exp); + filterIter.remove(); + } + + /* + * if all pushed filters where put back then set filterPushed to false + */ + if (leftFilters.size() == 0 && rightFilters.size() == 0 + && joinFilters.size() == origJoinFiltersSz) { + filterPushed = false; + } + + // Try to push down filters in ON clause. A ON clause filter can only be + // pushed down if it does not affect the non-matching set, i.e. it is + // not on the side which is preserved. + if (RelOptUtil.classifyFilters(join, joinFilters, null, false, !join + .getJoinType().generatesNullsOnRight(), !join.getJoinType() + .generatesNullsOnLeft(), joinFilters, leftFilters, rightFilters, joinTypeHolder, false)) { + filterPushed = true; + } + + if (!filterPushed) { + return; + } + + /* + * Remove always true conditions that got pushed down. + */ + removeAlwaysTruePredicates(leftFilters); + removeAlwaysTruePredicates(rightFilters); + removeAlwaysTruePredicates(joinFilters); + + // create FilterRels on top of the children if any filters were + // pushed to them + RexBuilder rexBuilder = join.getCluster().getRexBuilder(); + RelNode leftRel = createFilterOnRel(rexBuilder, join.getLeft(), leftFilters); + RelNode rightRel = createFilterOnRel(rexBuilder, join.getRight(), + rightFilters); + + // create the new join node referencing the new children and + // containing its new join filters (if there are any) + RexNode joinFilter; + + if (joinFilters.size() == 0) { + // if nothing actually got pushed and there is nothing leftover, + // then this rule is a no-op + if (leftFilters.isEmpty() + && rightFilters.isEmpty() + && joinTypeHolder.get() == join.getJoinType()) { + return; + } + joinFilter = rexBuilder.makeLiteral(true); + } else { + joinFilter = RexUtil.composeConjunction(rexBuilder, joinFilters, true); + } + RelNode newJoinRel = HiveJoinRel.getJoin(join.getCluster(), leftRel, + rightRel, joinFilter, join.getJoinType(), false); + + // create a FilterRel on top of the join if needed + RelNode newRel = createFilterOnRel(rexBuilder, newJoinRel, aboveFilters); + + call.transformTo(newRel); + } + + /** + * If the filter list passed in is non-empty, creates a FilterRel on top of + * the existing RelNode; otherwise, just returns the RelNode + * + * @param rexBuilder + * rex builder + * @param rel + * the RelNode that the filter will be put on top of + * @param filters + * list of filters + * @return new RelNode or existing one if no filters + */ + private RelNode createFilterOnRel(RexBuilder rexBuilder, RelNode rel, + List filters) { + RexNode andFilters = RexUtil.composeConjunction(rexBuilder, filters, false); + if (andFilters.isAlwaysTrue()) { + return rel; + } + return new HiveFilterRel(rel.getCluster(), rel.getCluster().traitSetOf( + HiveRel.CONVENTION), rel, andFilters); + } + + private void removeAlwaysTruePredicates(List predicates) { + + ListIterator iter = predicates.listIterator(); + while (iter.hasNext()) { + RexNode exp = iter.next(); + if (isAlwaysTrue(exp)) { + iter.remove(); + } + } + } + + private boolean isAlwaysTrue(RexNode predicate) { + if (predicate instanceof RexCall) { + RexCall c = (RexCall) predicate; + if (c.getOperator().getKind() == SqlKind.EQUALS) { + return isAlwaysTrue(c.getOperands().get(0)) + && isAlwaysTrue(c.getOperands().get(1)); + } + } + return predicate.isAlwaysTrue(); + } + + private boolean filterRefersToBothSidesOfJoin(RexNode filter, JoinRelBase j) { + boolean refersToBothSides = false; + + int joinNoOfProjects = j.getRowType().getFieldCount(); + BitSet filterProjs = new BitSet(joinNoOfProjects); + BitSet allLeftProjs = new BitSet(joinNoOfProjects); + BitSet allRightProjs = new BitSet(joinNoOfProjects); + allLeftProjs.set(0, j.getInput(0).getRowType().getFieldCount(), true); + allRightProjs.set(j.getInput(0).getRowType().getFieldCount(), joinNoOfProjects, true); + + InputFinder inputFinder = new InputFinder(filterProjs); + filter.accept(inputFinder); + + if (allLeftProjs.intersects(filterProjs) && allRightProjs.intersects(filterProjs)) + refersToBothSides = true; + + return refersToBothSides; + } +} + +// End PushFilterPastJoinRule.java + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java new file mode 100644 index 0000000..a218eca --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/PartitionPruner.java @@ -0,0 +1,207 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.rules; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexLiteral; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexVisitorImpl; +import org.eigenbase.sql.fun.SqlStdOperatorTable; +import org.eigenbase.util.Pair; + +public class PartitionPruner { + + /** + * Breaks the predicate into 2 pieces. The first piece is the expressions that + * only contain partition columns and can be used for Partition Pruning; the + * second piece is the predicates that are left. + * + * @param cluster + * @param hiveTable + * @param predicate + * @return a Pair of expressions, each of which maybe null. The 1st predicate + * is expressions that only contain partition columns; the 2nd + * predicate contains the remaining predicates. + */ + public static Pair extractPartitionPredicates( + RelOptCluster cluster, RelOptHiveTable hiveTable, RexNode predicate) { + RexNode partitionPruningPred = predicate + .accept(new ExtractPartPruningPredicate(cluster, hiveTable)); + RexNode remainingPred = predicate.accept(new ExtractRemainingPredicate( + cluster, partitionPruningPred)); + return new Pair(partitionPruningPred, remainingPred); + } + + public static class ExtractPartPruningPredicate extends + RexVisitorImpl { + + final RelOptHiveTable hiveTable; + final RelDataType rType; + final Set partCols; + final RelOptCluster cluster; + + public ExtractPartPruningPredicate(RelOptCluster cluster, + RelOptHiveTable hiveTable) { + super(true); + this.hiveTable = hiveTable; + rType = hiveTable.getRowType(); + List pfs = hiveTable.getHiveTableMD().getPartCols(); + partCols = new HashSet(); + for (FieldSchema pf : pfs) { + partCols.add(pf.getName()); + } + this.cluster = cluster; + } + + @Override + public RexNode visitLiteral(RexLiteral literal) { + return literal; + } + + @Override + public RexNode visitInputRef(RexInputRef inputRef) { + RelDataTypeField f = rType.getFieldList().get(inputRef.getIndex()); + if (partCols.contains(f.getName())) { + return inputRef; + } else { + return null; + } + } + + @Override + public RexNode visitCall(RexCall call) { + if (!deep) { + return null; + } + + List args = new LinkedList(); + boolean argsPruned = false; + + GenericUDF hiveUDF = SqlFunctionConverter.getHiveUDF(call.getOperator(), + call.getType()); + if (hiveUDF != null && + !FunctionRegistry.isDeterministic(hiveUDF)) { + return null; + } + + for (RexNode operand : call.operands) { + RexNode n = operand.accept(this); + if (n != null) { + args.add(n); + } else { + argsPruned = true; + } + } + + if (call.getOperator() != SqlStdOperatorTable.AND) { + return argsPruned ? null : call; + } else { + if (args.size() == 0) { + return null; + } else if (args.size() == 1) { + return args.get(0); + } else { + return cluster.getRexBuilder().makeCall(call.getOperator(), args); + } + } + } + + } + + public static class ExtractRemainingPredicate extends RexVisitorImpl { + + List pruningPredicates; + final RelOptCluster cluster; + + public ExtractRemainingPredicate(RelOptCluster cluster, + RexNode partPruningExpr) { + super(true); + this.cluster = cluster; + pruningPredicates = new ArrayList(); + flattenPredicates(partPruningExpr); + } + + private void flattenPredicates(RexNode r) { + if (r instanceof RexCall + && ((RexCall) r).getOperator() == SqlStdOperatorTable.AND) { + for (RexNode c : ((RexCall) r).getOperands()) { + flattenPredicates(c); + } + } else { + pruningPredicates.add(r); + } + } + + @Override + public RexNode visitLiteral(RexLiteral literal) { + return literal; + } + + @Override + public RexNode visitInputRef(RexInputRef inputRef) { + return inputRef; + } + + @Override + public RexNode visitCall(RexCall call) { + if (!deep) { + return null; + } + + if (call.getOperator() != SqlStdOperatorTable.AND) { + if (pruningPredicates.contains(call)) { + return null; + } else { + return call; + } + } + + List args = new LinkedList(); + + for (RexNode operand : call.operands) { + RexNode n = operand.accept(this); + if (n != null) { + args.add(n); + } + } + + if (args.size() == 0) { + return null; + } else if (args.size() == 1) { + return args.get(0); + } else { + return cluster.getRexBuilder().makeCall(call.getOperator(), args); + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java new file mode 100644 index 0000000..7ebe652 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java @@ -0,0 +1,229 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.BitSet; + +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.relopt.RelOptUtil.InputReferencedVisitor; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexVisitorImpl; +import org.eigenbase.sql.SqlKind; + +public class FilterSelectivityEstimator extends RexVisitorImpl { + private final RelNode m_childRel; + private final double m_childCardinality; + + protected FilterSelectivityEstimator(RelNode childRel) { + super(true); + m_childRel = childRel; + m_childCardinality = RelMetadataQuery.getRowCount(m_childRel); + } + + public Double estimateSelectivity(RexNode predicate) { + return predicate.accept(this); + } + + public Double visitCall(RexCall call) { + if (!deep) { + return 1.0; + } + + /* + * Ignore any predicates on partition columns + * because we have already accounted for these in + * the Table row count. + */ + if (isPartitionPredicate(call, m_childRel)) { + return 1.0; + } + + Double selectivity = null; + SqlKind op = call.getKind(); + + switch (op) { + case AND: { + selectivity = computeConjunctionSelectivity(call); + break; + } + + case OR: { + selectivity = computeDisjunctionSelectivity(call); + break; + } + + case NOT_EQUALS: { + selectivity = computeNotEqualitySelectivity(call); + } + + case LESS_THAN_OR_EQUAL: + case GREATER_THAN_OR_EQUAL: + case LESS_THAN: + case GREATER_THAN: { + selectivity = ((double) 1 / (double) 3); + break; + } + + case IN: { + selectivity = ((double) 1 / ((double) call.operands.size())); + break; + } + + default: + selectivity = computeFunctionSelectivity(call); + } + + return selectivity; + } + + /** + * NDV of "f1(x, y, z) != f2(p, q, r)" -> + * "(maxNDV(x,y,z,p,q,r) - 1)/maxNDV(x,y,z,p,q,r)". + *

+ * + * @param call + * @return + */ + private Double computeNotEqualitySelectivity(RexCall call) { + double tmpNDV = getMaxNDV(call); + + if (tmpNDV > 1) + return (tmpNDV - (double) 1) / tmpNDV; + else + return 1.0; + } + + /** + * Selectivity of f(X,y,z) -> 1/maxNDV(x,y,z). + *

+ * Note that >, >=, <, <=, = ... are considered generic functions and uses + * this method to find their selectivity. + * + * @param call + * @return + */ + private Double computeFunctionSelectivity(RexCall call) { + return 1 / getMaxNDV(call); + } + + /** + * Disjunction Selectivity -> (1 D(1-m1/n)(1-m2/n)) where n is the total + * number of tuples from child and m1 and m2 is the expected number of tuples + * from each part of the disjunction predicate. + *

+ * Note we compute m1. m2.. by applying selectivity of the disjunctive element + * on the cardinality from child. + * + * @param call + * @return + */ + private Double computeDisjunctionSelectivity(RexCall call) { + Double tmpCardinality; + Double tmpSelectivity; + double selectivity = 1; + + for (RexNode dje : call.getOperands()) { + tmpSelectivity = dje.accept(this); + if (tmpSelectivity == null) { + tmpSelectivity = 0.99; + } + tmpCardinality = m_childCardinality * tmpSelectivity; + + if (tmpCardinality > 1) + tmpSelectivity = (1 - tmpCardinality / m_childCardinality); + else + tmpSelectivity = 1.0; + + selectivity *= tmpSelectivity; + } + + if (selectivity > 1) + return (1 - selectivity); + else + return 1.0; + } + + /** + * Selectivity of conjunctive predicate -> (selectivity of conjunctive + * element1) * (selectivity of conjunctive element2)... + * + * @param call + * @return + */ + private Double computeConjunctionSelectivity(RexCall call) { + Double tmpSelectivity; + double selectivity = 1; + + for (RexNode cje : call.getOperands()) { + tmpSelectivity = cje.accept(this); + if (tmpSelectivity != null) { + selectivity *= tmpSelectivity; + } + } + + return selectivity; + } + + private Double getMaxNDV(RexCall call) { + double tmpNDV; + double maxNDV = 1.0; + InputReferencedVisitor irv; + + for (RexNode op : call.getOperands()) { + if (op instanceof RexInputRef) { + tmpNDV = HiveRelMdDistinctRowCount.getDistinctRowCount(m_childRel, + ((RexInputRef) op).getIndex()); + if (tmpNDV > maxNDV) + maxNDV = tmpNDV; + } else { + irv = new InputReferencedVisitor(); + irv.apply(op); + for (Integer childProjIndx : irv.inputPosReferenced) { + tmpNDV = HiveRelMdDistinctRowCount.getDistinctRowCount(m_childRel, childProjIndx); + if (tmpNDV > maxNDV) + maxNDV = tmpNDV; + } + } + } + + return maxNDV; + } + + private boolean isPartitionPredicate(RexNode expr, RelNode r) { + if ( r instanceof ProjectRelBase ) { + expr = RelOptUtil.pushFilterPastProject(expr, (ProjectRelBase) r); + return isPartitionPredicate(expr, ((ProjectRelBase) r).getChild()); + } else if ( r instanceof FilterRelBase ) { + return isPartitionPredicate(expr, ((FilterRelBase) r).getChild()); + } else if ( r instanceof HiveTableScanRel ) { + RelOptHiveTable table = (RelOptHiveTable) + ((HiveTableScanRel)r).getTable(); + BitSet cols = RelOptUtil.InputFinder.bits(expr); + return table.containsPartitionColumnsOnly(cols); + } + return false; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java new file mode 100644 index 0000000..4be57b1 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdDistinctRowCount.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.BitSet; +import java.util.List; + +import net.hydromatic.optiq.BuiltinMethod; + +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveCost; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.ChainedRelMetadataProvider; +import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMdDistinctRowCount; +import org.eigenbase.rel.metadata.RelMdUtil; +import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.relopt.RelOptCost; +import org.eigenbase.rex.RexNode; + +import com.google.common.collect.ImmutableList; + +public class HiveRelMdDistinctRowCount extends RelMdDistinctRowCount { + + private static final HiveRelMdDistinctRowCount INSTANCE = + new HiveRelMdDistinctRowCount(); + + public static final RelMetadataProvider SOURCE = ChainedRelMetadataProvider + .of(ImmutableList.of( + + ReflectiveRelMetadataProvider.reflectiveSource( + BuiltinMethod.DISTINCT_ROW_COUNT.method, INSTANCE), + + ReflectiveRelMetadataProvider.reflectiveSource( + BuiltinMethod.CUMULATIVE_COST.method, INSTANCE))); + + private HiveRelMdDistinctRowCount() { + } + + // Catch-all rule when none of the others apply. + @Override + public Double getDistinctRowCount(RelNode rel, BitSet groupKey, + RexNode predicate) { + if (rel instanceof HiveTableScanRel) { + return getDistinctRowCount((HiveTableScanRel) rel, groupKey, predicate); + } + /* + * For now use Optiq' default formulas for propagating NDVs up the Query + * Tree. + */ + return super.getDistinctRowCount(rel, groupKey, predicate); + } + + private Double getDistinctRowCount(HiveTableScanRel htRel, BitSet groupKey, + RexNode predicate) { + List projIndxLst = HiveOptiqUtil + .translateBitSetToProjIndx(groupKey); + List colStats = htRel.getColStat(projIndxLst); + Double noDistinctRows = 1.0; + for (ColStatistics cStat : colStats) { + noDistinctRows *= cStat.getCountDistint(); + } + + return Math.min(noDistinctRows, htRel.getRows()); + } + + public static Double getDistinctRowCount(RelNode r, int indx) { + BitSet bitSetOfRqdProj = new BitSet(); + bitSetOfRqdProj.set(indx); + return RelMetadataQuery.getDistinctRowCount(r, bitSetOfRqdProj, r + .getCluster().getRexBuilder().makeLiteral(true)); + } + + @Override + public Double getDistinctRowCount(JoinRelBase rel, BitSet groupKey, + RexNode predicate) { + if (rel instanceof HiveJoinRel) { + HiveJoinRel hjRel = (HiveJoinRel) rel; + //TODO: Improve this + if (hjRel.isLeftSemiJoin()) { + return RelMetadataQuery.getDistinctRowCount(hjRel.getLeft(), groupKey, + rel.getCluster().getRexBuilder().makeLiteral(true)); + } else { + return RelMdUtil.getJoinDistinctRowCount(rel, rel.getJoinType(), + groupKey, predicate, true); + } + } + + return RelMetadataQuery.getDistinctRowCount(rel, groupKey, predicate); + } + + /* + * Favor Broad Plans over Deep Plans. + */ + public RelOptCost getCumulativeCost(HiveJoinRel rel) { + RelOptCost cost = RelMetadataQuery.getNonCumulativeCost(rel); + List inputs = rel.getInputs(); + RelOptCost maxICost = HiveCost.ZERO; + for (RelNode input : inputs) { + RelOptCost iCost = RelMetadataQuery.getCumulativeCost(input); + if (maxICost.isLt(iCost)) { + maxICost = iCost; + } + } + return cost.plus(maxICost); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdRowCount.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdRowCount.java new file mode 100644 index 0000000..949eb19 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdRowCount.java @@ -0,0 +1,418 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.List; +import java.util.Set; + +import net.hydromatic.optiq.BuiltinMethod; +import net.hydromatic.optiq.util.BitSets; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.RelVisitor; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMdRowCount; +import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.rel.rules.SemiJoinRel; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.sql.fun.SqlStdOperatorTable; +import org.eigenbase.util.Holder; +import org.eigenbase.util.Pair; + +public class HiveRelMdRowCount extends RelMdRowCount { + + protected static final Log LOG = LogFactory.getLog(HiveRelMdRowCount.class.getName()); + + + public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider + .reflectiveSource(BuiltinMethod.ROW_COUNT.method, new HiveRelMdRowCount()); + + protected HiveRelMdRowCount() { + super(); + } + + public Double getRowCount(JoinRelBase join) { + PKFKRelationInfo pkfk = analyzeJoinForPKFK(join); + if (pkfk != null) { + double selectivity = (pkfk.pkInfo.selectivity * pkfk.ndvScalingFactor); + selectivity = Math.min(1.0, selectivity); + if (LOG.isDebugEnabled()) { + LOG.debug("Identified Primary - Foreign Key relation:"); + LOG.debug(RelOptUtil.toString(join)); + LOG.debug(pkfk); + } + return pkfk.fkInfo.rowCount * selectivity; + } + return join.getRows(); + } + + public Double getRowCount(SemiJoinRel rel) { + PKFKRelationInfo pkfk = analyzeJoinForPKFK(rel); + if (pkfk != null) { + double selectivity = (pkfk.pkInfo.selectivity * pkfk.ndvScalingFactor); + selectivity = Math.min(1.0, selectivity); + if (LOG.isDebugEnabled()) { + LOG.debug("Identified Primary - Foreign Key relation:"); + LOG.debug(RelOptUtil.toString(rel)); + LOG.debug(pkfk); + } + return pkfk.fkInfo.rowCount * selectivity; + } + return super.getRowCount(rel); + } + + static class PKFKRelationInfo { + public final int fkSide; + public final double ndvScalingFactor; + public final FKSideInfo fkInfo; + public final PKSideInfo pkInfo; + public final boolean isPKSideSimple; + + PKFKRelationInfo(int fkSide, + FKSideInfo fkInfo, + PKSideInfo pkInfo, + double ndvScalingFactor, + boolean isPKSideSimple) { + this.fkSide = fkSide; + this.fkInfo = fkInfo; + this.pkInfo = pkInfo; + this.ndvScalingFactor = ndvScalingFactor; + this.isPKSideSimple = isPKSideSimple; + } + + public String toString() { + return String.format( + "Primary - Foreign Key join:\n\tfkSide = %d\n\tFKInfo:%s\n" + + "\tPKInfo:%s\n\tisPKSideSimple:%s\n\tNDV Scaling Factor:%.2f\n", + fkSide, + fkInfo, + pkInfo, + isPKSideSimple, + ndvScalingFactor); + } + } + + static class FKSideInfo { + public final double rowCount; + public final double distinctCount; + public FKSideInfo(double rowCount, double distinctCount) { + this.rowCount = rowCount; + this.distinctCount = distinctCount; + } + + public String toString() { + return String.format("FKInfo(rowCount=%.2f,ndv=%.2f)", rowCount, distinctCount); + } + } + + static class PKSideInfo extends FKSideInfo { + public final double selectivity; + public PKSideInfo(double rowCount, double distinctCount, double selectivity) { + super(rowCount, distinctCount); + this.selectivity = selectivity; + } + + public String toString() { + return String.format("PKInfo(rowCount=%.2f,ndv=%.2f,selectivity=%.2f)", rowCount, distinctCount,selectivity); + } + } + + /* + * For T1 join T2 on T1.x = T2.y if we identify 'y' s a key of T2 then we can + * infer the join cardinality as: rowCount(T1) * selectivity(T2) i.e this is + * like a SemiJoin where the T1(Fact side/FK side) is filtered by a factor + * based on the Selectivity of the PK/Dim table side. + * + * 1. If both T1.x and T2.y are keys then use the larger one as the PK side. + * 2. In case of outer Joins: a) The FK side should be the Null Preserving + * side. It doesn't make sense to apply this heuristic in case of Dim loj Fact + * or Fact roj Dim b) The selectivity factor applied on the Fact Table should + * be 1. + */ + public static PKFKRelationInfo analyzeJoinForPKFK(JoinRelBase joinRel) { + + RelNode left = joinRel.getInputs().get(0); + RelNode right = joinRel.getInputs().get(1); + + final List initJoinFilters = RelOptUtil.conjunctions(joinRel + .getCondition()); + + /* + * No joining condition. + */ + if (initJoinFilters.isEmpty()) { + return null; + } + + List leftFilters = new ArrayList(); + List rightFilters = new ArrayList(); + List joinFilters = new ArrayList(initJoinFilters); + final Holder joinTypeHolder = Holder.of(joinRel.getJoinType()); + + // @todo: remove this. 8/28/14 hb + // for now adding because RelOptUtil.classifyFilters has an assertion about + // column counts that is not true for semiJoins. + if (joinRel instanceof SemiJoinRel) { + return null; + } + + RelOptUtil.classifyFilters(joinRel, joinFilters, joinRel.getJoinType(), + false, !joinRel.getJoinType().generatesNullsOnRight(), !joinRel + .getJoinType().generatesNullsOnLeft(), joinFilters, leftFilters, + rightFilters, joinTypeHolder, false); + + Pair joinCols = canHandleJoin(joinRel, leftFilters, + rightFilters, joinFilters); + if (joinCols == null) { + return null; + } + int leftColIdx = joinCols.left; + int rightColIdx = joinCols.right; + + RexBuilder rexBuilder = joinRel.getCluster().getRexBuilder(); + RexNode leftPred = RexUtil + .composeConjunction(rexBuilder, leftFilters, true); + RexNode rightPred = RexUtil.composeConjunction(rexBuilder, rightFilters, + true); + BitSet lBitSet = BitSets.of(leftColIdx); + BitSet rBitSet = BitSets.of(rightColIdx); + + /* + * If the form is Dim loj F or Fact roj Dim or Dim semij Fact then return + * null. + */ + boolean leftIsKey = (joinRel.getJoinType() == JoinRelType.INNER || joinRel + .getJoinType() == JoinRelType.RIGHT) + && !(joinRel instanceof SemiJoinRel) && isKey(lBitSet, left); + boolean rightIsKey = (joinRel.getJoinType() == JoinRelType.INNER || joinRel + .getJoinType() == JoinRelType.LEFT) && isKey(rBitSet, right); + + if (!leftIsKey && !rightIsKey) { + return null; + } + + double leftRowCount = RelMetadataQuery.getRowCount(left); + double rightRowCount = RelMetadataQuery.getRowCount(right); + + if (leftIsKey && rightIsKey) { + if (rightRowCount < leftRowCount) { + leftIsKey = false; + } + } + + int pkSide = leftIsKey ? 0 : rightIsKey ? 1 : -1; + + boolean isPKSideSimpleTree = pkSide != -1 ? + IsSimpleTreeOnJoinKey.check( + pkSide == 0 ? left : right, + pkSide == 0 ? leftColIdx : rightColIdx) : false; + + double leftNDV = isPKSideSimpleTree ? RelMetadataQuery.getDistinctRowCount(left, lBitSet, leftPred) : -1; + double rightNDV = isPKSideSimpleTree ? RelMetadataQuery.getDistinctRowCount(right, rBitSet, rightPred) : -1; + + /* + * If the ndv of the PK - FK side don't match, and the PK side is a filter + * on the Key column then scale the NDV on the FK side. + * + * As described by Peter Boncz: http://databasearchitects.blogspot.com/ + * in such cases we can be off by a large margin in the Join cardinality + * estimate. The e.g. he provides is on the join of StoreSales and DateDim + * on the TPCDS dataset. Since the DateDim is populated for 20 years into + * the future, while the StoreSales only has 5 years worth of data, there + * are 40 times fewer distinct dates in StoreSales. + * + * In general it is hard to infer the range for the foreign key on an + * arbitrary expression. For e.g. the NDV for DayofWeek is the same + * irrespective of NDV on the number of unique days, whereas the + * NDV of Quarters has the same ratio as the NDV on the keys. + * + * But for expressions that apply only on columns that have the same NDV + * as the key (implying that they are alternate keys) we can apply the + * ratio. So in the case of StoreSales - DateDim joins for predicate on the + * d_date column we can apply the scaling factor. + */ + double ndvScalingFactor = 1.0; + if ( isPKSideSimpleTree ) { + ndvScalingFactor = pkSide == 0 ? leftNDV/rightNDV : rightNDV / leftNDV; + } + + if (pkSide == 0) { + FKSideInfo fkInfo = new FKSideInfo(rightRowCount, + rightNDV); + PKSideInfo pkInfo = new PKSideInfo(leftRowCount, + leftNDV, + joinRel.getJoinType().generatesNullsOnRight() ? 1.0 : + isPKSideSimpleTree ? RelMetadataQuery.getSelectivity(left, leftPred) : 1.0); + + return new PKFKRelationInfo(1, fkInfo, pkInfo, ndvScalingFactor, isPKSideSimpleTree); + } + + if (pkSide == 1) { + FKSideInfo fkInfo = new FKSideInfo(leftRowCount, + leftNDV); + PKSideInfo pkInfo = new PKSideInfo(rightRowCount, + rightNDV, + joinRel.getJoinType().generatesNullsOnLeft() ? 1.0 : + isPKSideSimpleTree ? RelMetadataQuery.getSelectivity(right, rightPred) : 1.0); + + return new PKFKRelationInfo(1, fkInfo, pkInfo, ndvScalingFactor, isPKSideSimpleTree); + } + + return null; + } + + private static boolean isKey(BitSet c, RelNode rel) { + boolean isKey = false; + Set keys = RelMetadataQuery.getUniqueKeys(rel); + if (keys != null) { + for (BitSet key : keys) { + if (key.equals(c)) { + isKey = true; + break; + } + } + } + return isKey; + } + + /* + * 1. Join condition must be an Equality Predicate. + * 2. both sides must reference 1 column. + * 3. If needed flip the columns. + */ + private static Pair canHandleJoin(JoinRelBase joinRel, + List leftFilters, List rightFilters, + List joinFilters) { + + /* + * If after classifying filters there is more than 1 joining predicate, we + * don't handle this. Return null. + */ + if (joinFilters.size() != 1) { + return null; + } + + RexNode joinCond = joinFilters.get(0); + + int leftColIdx; + int rightColIdx; + + if (!(joinCond instanceof RexCall)) { + return null; + } + + if (((RexCall) joinCond).getOperator() != SqlStdOperatorTable.EQUALS) { + return null; + } + + BitSet leftCols = RelOptUtil.InputFinder.bits(((RexCall) joinCond).getOperands().get(0)); + BitSet rightCols = RelOptUtil.InputFinder.bits(((RexCall) joinCond).getOperands().get(1)); + + if (leftCols.cardinality() != 1 || rightCols.cardinality() != 1 ) { + return null; + } + + int nFieldsLeft = joinRel.getLeft().getRowType().getFieldList().size(); + int nFieldsRight = joinRel.getRight().getRowType().getFieldList().size(); + int nSysFields = joinRel.getSystemFieldList().size(); + BitSet rightFieldsBitSet = BitSets.range(nSysFields + nFieldsLeft, + nSysFields + nFieldsLeft + nFieldsRight); + /* + * flip column references if join condition specified in reverse order to + * join sources. + */ + if (BitSets.contains(rightFieldsBitSet, leftCols)) { + BitSet t = leftCols; + leftCols = rightCols; + rightCols = t; + } + + leftColIdx = leftCols.nextSetBit(0) - nSysFields; + rightColIdx = rightCols.nextSetBit(0) - (nSysFields + nFieldsLeft); + + return new Pair(leftColIdx, rightColIdx); + } + + private static class IsSimpleTreeOnJoinKey extends RelVisitor { + + int joinKey; + boolean simpleTree; + + static boolean check(RelNode r, int joinKey) { + IsSimpleTreeOnJoinKey v = new IsSimpleTreeOnJoinKey(joinKey); + v.go(r); + return v.simpleTree; + } + + IsSimpleTreeOnJoinKey(int joinKey) { + super(); + this.joinKey = joinKey; + simpleTree = true; + } + + @Override + public void visit(RelNode node, int ordinal, RelNode parent) { + + if (node instanceof TableAccessRelBase) { + simpleTree = true; + } else if (node instanceof ProjectRelBase) { + simpleTree = isSimple((ProjectRelBase) node); + } else if (node instanceof FilterRelBase) { + simpleTree = isSimple((FilterRelBase) node); + } else { + simpleTree = false; + } + + if (simpleTree) { + super.visit(node, ordinal, parent); + } + } + + private boolean isSimple(ProjectRelBase project) { + RexNode r = project.getProjects().get(joinKey); + if (r instanceof RexInputRef) { + joinKey = ((RexInputRef) r).getIndex(); + return true; + } + return false; + } + + private boolean isSimple(FilterRelBase filter) { + BitSet condBits = RelOptUtil.InputFinder.bits(filter.getCondition()); + return isKey(condBits, filter); + } + + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java new file mode 100644 index 0000000..5d9b145 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdSelectivity.java @@ -0,0 +1,228 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import net.hydromatic.optiq.BuiltinMethod; + +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil.JoinLeafPredicateInfo; +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil.JoinPredicateInfo; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMdSelectivity; +import org.eigenbase.rel.metadata.RelMdUtil; +import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataQuery; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; + +import com.google.common.collect.ImmutableMap; + +public class HiveRelMdSelectivity extends RelMdSelectivity { + public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider.reflectiveSource( + BuiltinMethod.SELECTIVITY.method, + new HiveRelMdSelectivity()); + + protected HiveRelMdSelectivity() { + super(); + } + + public Double getSelectivity(HiveTableScanRel t, RexNode predicate) { + if (predicate != null) { + FilterSelectivityEstimator filterSelEstmator = new FilterSelectivityEstimator(t); + return filterSelEstmator.estimateSelectivity(predicate); + } + + return 1.0; + } + + public Double getSelectivity(HiveJoinRel j, RexNode predicate) { + if (j.getJoinType().equals(JoinRelType.INNER)) { + return computeInnerJoinSelectivity(j, predicate); + } + return 1.0; + } + + private Double computeInnerJoinSelectivity(HiveJoinRel j, RexNode predicate) { + double ndvCrossProduct = 1; + RexNode combinedPredicate = getCombinedPredicateForJoin(j, predicate); + JoinPredicateInfo jpi = JoinPredicateInfo.constructJoinPredicateInfo(j, + combinedPredicate); + ImmutableMap.Builder colStatMapBuilder = ImmutableMap + .builder(); + ImmutableMap colStatMap; + int rightOffSet = j.getLeft().getRowType().getFieldCount(); + + // 1. Update Col Stats Map with col stats for columns from left side of + // Join which are part of join keys + for (Integer ljk : jpi.getProjsFromLeftPartOfJoinKeysInChildSchema()) { + colStatMapBuilder.put(ljk, + HiveRelMdDistinctRowCount.getDistinctRowCount(j.getLeft(), ljk)); + } + + // 2. Update Col Stats Map with col stats for columns from right side of + // Join which are part of join keys + for (Integer rjk : jpi.getProjsFromRightPartOfJoinKeysInChildSchema()) { + colStatMapBuilder.put(rjk + rightOffSet, + HiveRelMdDistinctRowCount.getDistinctRowCount(j.getRight(), rjk)); + } + colStatMap = colStatMapBuilder.build(); + + // 3. Walk through the Join Condition Building NDV for selectivity + // NDV of the join can not exceed the cardinality of cross join. + List peLst = jpi.getEquiJoinPredicateElements(); + int noOfPE = peLst.size(); + if (noOfPE > 0) { + ndvCrossProduct = exponentialBackoff(peLst, colStatMap); + + if (j.isLeftSemiJoin()) + ndvCrossProduct = Math.min(RelMetadataQuery.getRowCount(j.getLeft()), + ndvCrossProduct); + else + ndvCrossProduct = Math.min(RelMetadataQuery.getRowCount(j.getLeft()) + * RelMetadataQuery.getRowCount(j.getRight()), ndvCrossProduct); + } + + // 4. Join Selectivity = 1/NDV + return (1 / ndvCrossProduct); + } + + // 3.2 if conjunctive predicate elements are more than one, then walk + // through them one by one. Compute cross product of NDV. Cross product is + // computed by multiplying the largest NDV of all of the conjunctive + // predicate + // elements with degraded NDV of rest of the conjunctive predicate + // elements. NDV is + // degraded using log function.Finally the ndvCrossProduct is fenced at + // the join + // cross product to ensure that NDV can not exceed worst case join + // cardinality.
+ // NDV of a conjunctive predicate element is the max NDV of all arguments + // to lhs, rhs expressions. + // NDV(JoinCondition) = min (left cardinality * right cardinality, + // ndvCrossProduct(JoinCondition)) + // ndvCrossProduct(JoinCondition) = ndv(pex)*log(ndv(pe1))*log(ndv(pe2)) + // where pex is the predicate element of join condition with max ndv. + // ndv(pe) = max(NDV(left.Expr), NDV(right.Expr)) + // NDV(expr) = max(NDV( expr args)) + protected double logSmoothing(List peLst, ImmutableMap colStatMap) { + int noOfPE = peLst.size(); + double ndvCrossProduct = getMaxNDVForJoinSelectivity(peLst.get(0), colStatMap); + if (noOfPE > 1) { + double maxNDVSoFar = ndvCrossProduct; + double ndvToBeSmoothed; + double tmpNDV; + + for (int i = 1; i < noOfPE; i++) { + tmpNDV = getMaxNDVForJoinSelectivity(peLst.get(i), colStatMap); + if (tmpNDV > maxNDVSoFar) { + ndvToBeSmoothed = maxNDVSoFar; + maxNDVSoFar = tmpNDV; + ndvCrossProduct = (ndvCrossProduct / ndvToBeSmoothed) * tmpNDV; + } else { + ndvToBeSmoothed = tmpNDV; + } + // TODO: revisit the fence + if (ndvToBeSmoothed > 3) + ndvCrossProduct *= Math.log(ndvToBeSmoothed); + else + ndvCrossProduct *= ndvToBeSmoothed; + } + } + return ndvCrossProduct; + } + + /* + * a) Order predciates based on ndv in reverse order. b) ndvCrossProduct = + * ndv(pe0) * ndv(pe1) ^(1/2) * ndv(pe2) ^(1/4) * ndv(pe3) ^(1/8) ... + */ + protected double exponentialBackoff(List peLst, + ImmutableMap colStatMap) { + int noOfPE = peLst.size(); + List ndvs = new ArrayList(noOfPE); + for (int i = 0; i < noOfPE; i++) { + ndvs.add(getMaxNDVForJoinSelectivity(peLst.get(i), colStatMap)); + } + Collections.sort(ndvs); + Collections.reverse(ndvs); + double ndvCrossProduct = 1.0; + for (int i = 0; i < ndvs.size(); i++) { + double n = Math.pow(ndvs.get(i), Math.pow(1 / 2.0, i)); + ndvCrossProduct *= n; + } + return ndvCrossProduct; + } + + private RexNode getCombinedPredicateForJoin(HiveJoinRel j, RexNode additionalPredicate) { + RexNode minusPred = RelMdUtil.minusPreds(j.getCluster().getRexBuilder(), additionalPredicate, + j.getCondition()); + + if (minusPred != null) { + List minusList = new ArrayList(); + minusList.add(j.getCondition()); + minusList.add(minusPred); + + return RexUtil.composeConjunction(j.getCluster().getRexBuilder(), minusList, true); + } + + return j.getCondition(); + } + + /** + * Compute Max NDV to determine Join Selectivity. + * + * @param jlpi + * @param colStatMap + * Immutable Map of Projection Index (in Join Schema) to Column Stat + * @param rightProjOffSet + * @return + */ + private static Double getMaxNDVForJoinSelectivity(JoinLeafPredicateInfo jlpi, + ImmutableMap colStatMap) { + Double maxNDVSoFar = 1.0; + + maxNDVSoFar = getMaxNDVFromProjections(colStatMap, + jlpi.getProjsFromLeftPartOfJoinKeysInJoinSchema(), maxNDVSoFar); + maxNDVSoFar = getMaxNDVFromProjections(colStatMap, + jlpi.getProjsFromRightPartOfJoinKeysInJoinSchema(), maxNDVSoFar); + + return maxNDVSoFar; + } + + private static Double getMaxNDVFromProjections(Map colStatMap, + Set projectionSet, Double defaultMaxNDV) { + Double colNDV = null; + Double maxNDVSoFar = defaultMaxNDV; + + for (Integer projIndx : projectionSet) { + colNDV = colStatMap.get(projIndx); + if (colNDV > maxNDVSoFar) + maxNDVSoFar = colNDV; + } + + return maxNDVSoFar; + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java new file mode 100644 index 0000000..06ff584 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.optimizer.optiq.stats; + +import java.util.BitSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import net.hydromatic.optiq.BuiltinMethod; +import net.hydromatic.optiq.util.BitSets; + +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.apache.hadoop.hive.ql.plan.ColStatistics; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.BuiltInMetadata; +import org.eigenbase.rel.metadata.Metadata; +import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMdUniqueKeys; +import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; + +import com.google.common.base.Function; + +public class HiveRelMdUniqueKeys { + + public static final RelMetadataProvider SOURCE = ReflectiveRelMetadataProvider + .reflectiveSource(BuiltinMethod.UNIQUE_KEYS.method, + new HiveRelMdUniqueKeys()); + + /* + * Infer Uniquenes if: - rowCount(col) = ndv(col) - TBD for numerics: max(col) + * - min(col) = rowCount(col) + * + * Why are we intercepting ProjectRelbase and not TableScan? Because if we + * have a method for TableScan, it will not know which columns to check for. + * Inferring Uniqueness for all columns is very expensive right now. The flip + * side of doing this is, it only works post Field Trimming. + */ + public Set getUniqueKeys(ProjectRelBase rel, boolean ignoreNulls) { + + RelNode child = rel.getChild(); + + if (!(child instanceof HiveTableScanRel)) { + Function fn = RelMdUniqueKeys.SOURCE.apply( + rel.getClass(), BuiltInMetadata.UniqueKeys.class); + return ((BuiltInMetadata.UniqueKeys) fn.apply(rel)) + .getUniqueKeys(ignoreNulls); + } + + HiveTableScanRel tScan = (HiveTableScanRel) child; + Map posMap = new HashMap(); + int projectPos = 0; + int colStatsPos = 0; + + BitSet projectedCols = new BitSet(); + for (RexNode r : rel.getProjects()) { + if (r instanceof RexInputRef) { + projectedCols.set(((RexInputRef) r).getIndex()); + posMap.put(colStatsPos, projectPos); + colStatsPos++; + } + projectPos++; + } + + double numRows = tScan.getRows(); + List colStats = tScan.getColStat(BitSets + .toList(projectedCols)); + Set keys = new HashSet(); + + colStatsPos = 0; + for (ColStatistics cStat : colStats) { + boolean isKey = false; + if (cStat.getCountDistint() >= numRows) { + isKey = true; + } + if ( !isKey && cStat.getRange() != null && + cStat.getRange().maxValue != null && + cStat.getRange().minValue != null) { + double r = cStat.getRange().maxValue.doubleValue() - + cStat.getRange().minValue.doubleValue() + 1; + isKey = (numRows == r); + } + if ( isKey ) { + BitSet key = new BitSet(); + key.set(posMap.get(colStatsPos)); + keys.add(key); + } + colStatsPos++; + } + + return keys; + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java new file mode 100644 index 0000000..98723a3 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTBuilder.java @@ -0,0 +1,236 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Calendar; + +import net.hydromatic.avatica.ByteString; + +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.rex.RexLiteral; +import org.eigenbase.sql.type.SqlTypeName; + +class ASTBuilder { + + static ASTBuilder construct(int tokenType, String text) { + ASTBuilder b = new ASTBuilder(); + b.curr = createAST(tokenType, text); + return b; + } + + static ASTNode createAST(int tokenType, String text) { + return (ASTNode) ParseDriver.adaptor.create(tokenType, text); + } + + static ASTNode destNode() { + return ASTBuilder + .construct(HiveParser.TOK_DESTINATION, "TOK_DESTINATION") + .add( + ASTBuilder.construct(HiveParser.TOK_DIR, "TOK_DIR").add(HiveParser.TOK_TMP_FILE, + "TOK_TMP_FILE")).node(); + } + + static ASTNode table(TableAccessRelBase scan) { + RelOptHiveTable hTbl = (RelOptHiveTable) scan.getTable(); + ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_TABREF, "TOK_TABREF").add( + ASTBuilder.construct(HiveParser.TOK_TABNAME, "TOK_TABNAME") + .add(HiveParser.Identifier, hTbl.getHiveTableMD().getDbName()) + .add(HiveParser.Identifier, hTbl.getHiveTableMD().getTableName())); + + // NOTE: Optiq considers tbls to be equal if their names are the same. Hence + // we need to provide Optiq the fully qualified table name (dbname.tblname) + // and not the user provided aliases. + // However in HIVE DB name can not appear in select list; in case of join + // where table names differ only in DB name, Hive would require user + // introducing explicit aliases for tbl. + b.add(HiveParser.Identifier, hTbl.getTableAlias()); + return b.node(); + } + + static ASTNode join(ASTNode left, ASTNode right, JoinRelType joinType, ASTNode cond, + boolean semiJoin) { + ASTBuilder b = null; + + switch (joinType) { + case INNER: + if (semiJoin) { + b = ASTBuilder.construct(HiveParser.TOK_LEFTSEMIJOIN, "TOK_LEFTSEMIJOIN"); + } else { + b = ASTBuilder.construct(HiveParser.TOK_JOIN, "TOK_JOIN"); + } + break; + case LEFT: + b = ASTBuilder.construct(HiveParser.TOK_LEFTOUTERJOIN, "TOK_LEFTOUTERJOIN"); + break; + case RIGHT: + b = ASTBuilder.construct(HiveParser.TOK_RIGHTOUTERJOIN, "TOK_RIGHTOUTERJOIN"); + break; + case FULL: + b = ASTBuilder.construct(HiveParser.TOK_FULLOUTERJOIN, "TOK_FULLOUTERJOIN"); + break; + } + + b.add(left).add(right).add(cond); + return b.node(); + } + + static ASTNode subQuery(ASTNode qry, String alias) { + return ASTBuilder.construct(HiveParser.TOK_SUBQUERY, "TOK_SUBQUERY").add(qry) + .add(HiveParser.Identifier, alias).node(); + } + + static ASTNode qualifiedName(String tableName, String colName) { + ASTBuilder b = ASTBuilder + .construct(HiveParser.DOT, ".") + .add( + ASTBuilder.construct(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL").add( + HiveParser.Identifier, tableName)).add(HiveParser.Identifier, colName); + return b.node(); + } + + static ASTNode unqualifiedName(String colName) { + ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_TABLE_OR_COL, "TOK_TABLE_OR_COL").add( + HiveParser.Identifier, colName); + return b.node(); + } + + static ASTNode where(ASTNode cond) { + return ASTBuilder.construct(HiveParser.TOK_WHERE, "TOK_WHERE").add(cond).node(); + } + + static ASTNode having(ASTNode cond) { + return ASTBuilder.construct(HiveParser.TOK_HAVING, "TOK_HAVING").add(cond).node(); + } + + static ASTNode limit(Object value) { + return ASTBuilder.construct(HiveParser.TOK_LIMIT, "TOK_LIMIT") + .add(HiveParser.Number, value.toString()).node(); + } + + static ASTNode selectExpr(ASTNode expr, String alias) { + return ASTBuilder.construct(HiveParser.TOK_SELEXPR, "TOK_SELEXPR").add(expr) + .add(HiveParser.Identifier, alias).node(); + } + + static ASTNode literal(RexLiteral literal) { + Object val = null; + int type = 0; + SqlTypeName sqlType = literal.getType().getSqlTypeName(); + + switch (sqlType) { + case BINARY: + ByteString bs = (ByteString) literal.getValue(); + val = bs.byteAt(0); + type = HiveParser.BigintLiteral; + break; + case TINYINT: + val = literal.getValue3(); + type = HiveParser.TinyintLiteral; + break; + case SMALLINT: + val = literal.getValue3(); + type = HiveParser.SmallintLiteral; + break; + case INTEGER: + case BIGINT: + val = literal.getValue3(); + type = HiveParser.BigintLiteral; + break; + case DOUBLE: + val = literal.getValue3() + "D"; + type = HiveParser.Number; + break; + case DECIMAL: + val = literal.getValue3() + "BD"; + type = HiveParser.DecimalLiteral; + break; + case FLOAT: + case REAL: + val = literal.getValue3(); + type = HiveParser.Number; + break; + case VARCHAR: + case CHAR: + val = literal.getValue3(); + String escapedVal = BaseSemanticAnalyzer.escapeSQLString(String.valueOf(val)); + type = HiveParser.StringLiteral; + val = "'" + escapedVal + "'"; + break; + case BOOLEAN: + val = literal.getValue3(); + type = ((Boolean) val).booleanValue() ? HiveParser.KW_TRUE : HiveParser.KW_FALSE; + break; + case DATE: { + val = literal.getValue(); + type = HiveParser.TOK_DATELITERAL; + DateFormat df = new SimpleDateFormat("yyyy-MM-dd"); + val = df.format(((Calendar) val).getTime()); + val = "'" + val + "'"; + } + break; + case TIME: + case TIMESTAMP: { + val = literal.getValue(); + type = HiveParser.TOK_TIMESTAMP; + DateFormat df = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss"); + val = df.format(((Calendar) val).getTime()); + val = "'" + val + "'"; + } + break; + case NULL: + type = HiveParser.TOK_NULL; + break; + + default: + throw new RuntimeException("Unsupported Type: " + sqlType); + } + + return (ASTNode) ParseDriver.adaptor.create(type, String.valueOf(val)); + } + + ASTNode curr; + + ASTNode node() { + return curr; + } + + ASTBuilder add(int tokenType, String text) { + ParseDriver.adaptor.addChild(curr, createAST(tokenType, text)); + return this; + } + + ASTBuilder add(ASTBuilder b) { + ParseDriver.adaptor.addChild(curr, b.curr); + return this; + } + + ASTBuilder add(ASTNode n) { + if (n != null) { + ParseDriver.adaptor.addChild(curr, n); + } + return this; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java new file mode 100644 index 0000000..f5a704f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java @@ -0,0 +1,660 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.math.BigDecimal; +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import net.hydromatic.optiq.util.BitSets; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException; +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter.HiveToken; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.eigenbase.rel.AggregateCall; +import org.eigenbase.rel.AggregateRelBase; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelFieldCollation; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.RelVisitor; +import org.eigenbase.rel.SortRel; +import org.eigenbase.rel.TableAccessRelBase; +import org.eigenbase.rel.UnionRelBase; +import org.eigenbase.rel.rules.SemiJoinRel; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexFieldAccess; +import org.eigenbase.rex.RexFieldCollation; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexLiteral; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexOver; +import org.eigenbase.rex.RexVisitorImpl; +import org.eigenbase.rex.RexWindow; +import org.eigenbase.rex.RexWindowBound; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlOperator; +import org.eigenbase.sql.type.SqlTypeName; + +import com.google.common.collect.Iterables; + +public class ASTConverter { + + private RelNode root; + private HiveAST hiveAST; + private RelNode from; + private FilterRelBase where; + private AggregateRelBase groupBy; + private FilterRelBase having; + private ProjectRelBase select; + private SortRel order; + private SortRel limit; + + private Schema schema; + + private long derivedTableCount; + + ASTConverter(RelNode root, long dtCounterInitVal) { + this.root = root; + hiveAST = new HiveAST(); + this.derivedTableCount = dtCounterInitVal; + } + + public static ASTNode convert(final RelNode relNode, List resultSchema) + throws OptiqSemanticException { + RelNode root = PlanModifierForASTConv.convertOpTree(relNode, resultSchema); + ASTConverter c = new ASTConverter(root, 0); + return c.convert(); + } + + private ASTNode convert() { + /* + * 1. Walk RelNode Graph; note from, where, gBy.. nodes. + */ + new QBVisitor().go(root); + + /* + * 2. convert from node. + */ + QueryBlockInfo qb = convertSource(from); + schema = qb.schema; + hiveAST.from = ASTBuilder.construct(HiveParser.TOK_FROM, "TOK_FROM").add(qb.ast).node(); + + /* + * 3. convert filterNode + */ + if (where != null) { + ASTNode cond = where.getCondition().accept(new RexVisitor(schema)); + hiveAST.where = ASTBuilder.where(cond); + } + + /* + * 4. GBy + */ + if (groupBy != null) { + ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_GROUPBY, "TOK_GROUPBY"); + for (int i : BitSets.toIter(groupBy.getGroupSet())) { + RexInputRef iRef = new RexInputRef(i, groupBy.getCluster().getTypeFactory() + .createSqlType(SqlTypeName.ANY)); + b.add(iRef.accept(new RexVisitor(schema))); + } + + if (!groupBy.getGroupSet().isEmpty()) + hiveAST.groupBy = b.node(); + schema = new Schema(schema, groupBy); + } + + /* + * 5. Having + */ + if (having != null) { + ASTNode cond = having.getCondition().accept(new RexVisitor(schema)); + hiveAST.having = ASTBuilder.having(cond); + } + + /* + * 6. Project + */ + ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_SELECT, "TOK_SELECT"); + + if (select.getChildExps().isEmpty()) { + RexLiteral r = select.getCluster().getRexBuilder().makeExactLiteral(new BigDecimal(1)); + ASTNode selectExpr = ASTBuilder.selectExpr(ASTBuilder.literal(r), "1"); + b.add(selectExpr); + } else { + int i = 0; + + for (RexNode r : select.getChildExps()) { + ASTNode selectExpr = ASTBuilder.selectExpr(r.accept(new RexVisitor(schema)), select + .getRowType().getFieldNames().get(i++)); + b.add(selectExpr); + } + } + hiveAST.select = b.node(); + + /* + * 7. Order Use in Order By from the block above. RelNode has no pointer to + * parent hence we need to go top down; but OB at each block really belong + * to its src/from. Hence the need to pass in sortRel for each block from + * its parent. + */ + convertOBToASTNode((HiveSortRel) order); + + // 8. Limit + convertLimitToASTNode((HiveSortRel) limit); + + return hiveAST.getAST(); + } + + private void convertLimitToASTNode(HiveSortRel limit) { + if (limit != null) { + HiveSortRel hiveLimit = (HiveSortRel) limit; + RexNode limitExpr = hiveLimit.getFetchExpr(); + if (limitExpr != null) { + Object val = ((RexLiteral) limitExpr).getValue2(); + hiveAST.limit = ASTBuilder.limit(val); + } + } + } + + private void convertOBToASTNode(HiveSortRel order) { + if (order != null) { + HiveSortRel hiveSort = (HiveSortRel) order; + if (!hiveSort.getCollation().getFieldCollations().isEmpty()) { + // 1 Add order by token + ASTNode orderAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY"); + + schema = new Schema((HiveSortRel) hiveSort); + Map obRefToCallMap = hiveSort.getInputRefToCallMap(); + RexNode obExpr; + ASTNode astCol; + for (RelFieldCollation c : hiveSort.getCollation().getFieldCollations()) { + + // 2 Add Direction token + ASTNode directionAST = c.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); + + // 3 Convert OB expr (OB Expr is usually an input ref except for top + // level OB; top level OB will have RexCall kept in a map.) + obExpr = null; + if (obRefToCallMap != null) + obExpr = obRefToCallMap.get(c.getFieldIndex()); + + if (obExpr != null) { + astCol = obExpr.accept(new RexVisitor(schema)); + } else { + ColumnInfo cI = schema.get(c.getFieldIndex()); + /* + * The RowResolver setup for Select drops Table associations. So + * setup ASTNode on unqualified name. + */ + astCol = ASTBuilder.unqualifiedName(cI.column); + } + + // 4 buildup the ob expr AST + directionAST.addChild(astCol); + orderAst.addChild(directionAST); + } + hiveAST.order = orderAst; + } + } + } + + private Schema getRowSchema(String tblAlias) { + return new Schema(select, tblAlias); + } + + private QueryBlockInfo convertSource(RelNode r) { + Schema s; + ASTNode ast; + + if (r instanceof TableAccessRelBase) { + TableAccessRelBase f = (TableAccessRelBase) r; + s = new Schema(f); + ast = ASTBuilder.table(f); + } else if (r instanceof JoinRelBase) { + JoinRelBase join = (JoinRelBase) r; + QueryBlockInfo left = convertSource(join.getLeft()); + QueryBlockInfo right = convertSource(join.getRight()); + s = new Schema(left.schema, right.schema); + ASTNode cond = join.getCondition().accept(new RexVisitor(s)); + boolean semiJoin = join instanceof SemiJoinRel; + ast = ASTBuilder.join(left.ast, right.ast, join.getJoinType(), cond, semiJoin); + if (semiJoin) + s = left.schema; + } else if (r instanceof UnionRelBase) { + RelNode leftInput = ((UnionRelBase) r).getInput(0); + RelNode rightInput = ((UnionRelBase) r).getInput(1); + + ASTConverter leftConv = new ASTConverter(leftInput, this.derivedTableCount); + ASTConverter rightConv = new ASTConverter(rightInput, this.derivedTableCount); + ASTNode leftAST = leftConv.convert(); + ASTNode rightAST = rightConv.convert(); + + ASTNode unionAST = getUnionAllAST(leftAST, rightAST); + + String sqAlias = nextAlias(); + ast = ASTBuilder.subQuery(unionAST, sqAlias); + s = new Schema((UnionRelBase) r, sqAlias); + } else { + ASTConverter src = new ASTConverter(r, this.derivedTableCount); + ASTNode srcAST = src.convert(); + String sqAlias = nextAlias(); + s = src.getRowSchema(sqAlias); + ast = ASTBuilder.subQuery(srcAST, sqAlias); + } + return new QueryBlockInfo(s, ast); + } + + class QBVisitor extends RelVisitor { + + public void handle(FilterRelBase filter) { + RelNode child = filter.getChild(); + if (child instanceof AggregateRelBase && !((AggregateRelBase) child).getGroupSet().isEmpty()) { + ASTConverter.this.having = filter; + } else { + ASTConverter.this.where = filter; + } + } + + public void handle(ProjectRelBase project) { + if (ASTConverter.this.select == null) { + ASTConverter.this.select = project; + } else { + ASTConverter.this.from = project; + } + } + + @Override + public void visit(RelNode node, int ordinal, RelNode parent) { + + if (node instanceof TableAccessRelBase) { + ASTConverter.this.from = node; + } else if (node instanceof FilterRelBase) { + handle((FilterRelBase) node); + } else if (node instanceof ProjectRelBase) { + handle((ProjectRelBase) node); + } else if (node instanceof JoinRelBase) { + ASTConverter.this.from = node; + } else if (node instanceof UnionRelBase) { + ASTConverter.this.from = node; + } else if (node instanceof AggregateRelBase) { + ASTConverter.this.groupBy = (AggregateRelBase) node; + } else if (node instanceof SortRel) { + if (ASTConverter.this.select != null) { + ASTConverter.this.from = node; + } else { + SortRel hiveSortRel = (SortRel) node; + if (hiveSortRel.getCollation().getFieldCollations().isEmpty()) + ASTConverter.this.limit = hiveSortRel; + else + ASTConverter.this.order = hiveSortRel; + } + } + /* + * once the source node is reached; stop traversal for this QB + */ + if (ASTConverter.this.from == null) { + node.childrenAccept(this); + } + } + + } + + static class RexVisitor extends RexVisitorImpl { + + private final Schema schema; + + protected RexVisitor(Schema schema) { + super(true); + this.schema = schema; + } + + @Override + public ASTNode visitFieldAccess(RexFieldAccess fieldAccess) { + return ASTBuilder.construct(HiveParser.DOT, ".").add(super.visitFieldAccess(fieldAccess)) + .add(HiveParser.Identifier, fieldAccess.getField().getName()).node(); + } + + @Override + public ASTNode visitInputRef(RexInputRef inputRef) { + ColumnInfo cI = schema.get(inputRef.getIndex()); + if (cI.agg != null) { + return (ASTNode) ParseDriver.adaptor.dupTree(cI.agg); + } + + if (cI.table == null || cI.table.isEmpty()) + return ASTBuilder.unqualifiedName(cI.column); + else + return ASTBuilder.qualifiedName(cI.table, cI.column); + + } + + @Override + public ASTNode visitLiteral(RexLiteral literal) { + return ASTBuilder.literal(literal); + } + + private ASTNode getPSpecAST(RexWindow window) { + ASTNode pSpecAst = null; + + ASTNode dByAst = null; + if (window.partitionKeys != null && !window.partitionKeys.isEmpty()) { + dByAst = ASTBuilder.createAST(HiveParser.TOK_DISTRIBUTEBY, "TOK_DISTRIBUTEBY"); + for (RexNode pk : window.partitionKeys) { + ASTNode astCol = pk.accept(this); + dByAst.addChild(astCol); + } + } + + ASTNode oByAst = null; + if (window.orderKeys != null && !window.orderKeys.isEmpty()) { + oByAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY"); + for (RexFieldCollation ok : window.orderKeys) { + ASTNode astNode = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder + .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC"); + ASTNode astCol = ok.left.accept(this); + astNode.addChild(astCol); + oByAst.addChild(astNode); + } + } + + if (dByAst != null || oByAst != null) { + pSpecAst = ASTBuilder.createAST(HiveParser.TOK_PARTITIONINGSPEC, "TOK_PARTITIONINGSPEC"); + if (dByAst != null) + pSpecAst.addChild(dByAst); + if (oByAst != null) + pSpecAst.addChild(oByAst); + } + + return pSpecAst; + } + + private ASTNode getWindowBound(RexWindowBound wb) { + ASTNode wbAST = null; + + if (wb.isCurrentRow()) { + wbAST = ASTBuilder.createAST(HiveParser.KW_CURRENT, "CURRENT"); + } else { + if (wb.isPreceding()) + wbAST = ASTBuilder.createAST(HiveParser.KW_PRECEDING, "PRECEDING"); + else + wbAST = ASTBuilder.createAST(HiveParser.KW_FOLLOWING, "FOLLOWING"); + if (wb.isUnbounded()) { + wbAST.addChild(ASTBuilder.createAST(HiveParser.KW_UNBOUNDED, "UNBOUNDED")); + } else { + ASTNode offset = wb.getOffset().accept(this); + wbAST.addChild(offset); + } + } + + return wbAST; + } + + private ASTNode getWindowRangeAST(RexWindow window) { + ASTNode wRangeAst = null; + + ASTNode startAST = null; + RexWindowBound ub = window.getUpperBound(); + if (ub != null) { + startAST = getWindowBound(ub); + } + + ASTNode endAST = null; + RexWindowBound lb = window.getLowerBound(); + if (lb != null) { + endAST = getWindowBound(lb); + } + + if (startAST != null || endAST != null) { + // NOTE: in Hive AST Rows->Range(Physical) & Range -> Values (logical) + if (window.isRows()) + wRangeAst = ASTBuilder.createAST(HiveParser.TOK_WINDOWRANGE, "TOK_WINDOWRANGE"); + else + wRangeAst = ASTBuilder.createAST(HiveParser.TOK_WINDOWVALUES, "TOK_WINDOWVALUES"); + if (startAST != null) + wRangeAst.addChild(startAST); + if (endAST != null) + wRangeAst.addChild(endAST); + } + + return wRangeAst; + } + + @Override + public ASTNode visitOver(RexOver over) { + if (!deep) { + return null; + } + + // 1. Translate the UDAF + final ASTNode wUDAFAst = visitCall(over); + + // 2. Add TOK_WINDOW as child of UDAF + ASTNode wSpec = ASTBuilder.createAST(HiveParser.TOK_WINDOWSPEC, "TOK_WINDOWSPEC"); + wUDAFAst.addChild(wSpec); + + // 3. Add Part Spec & Range Spec as child of TOK_WINDOW + final RexWindow window = over.getWindow(); + final ASTNode wPSpecAst = getPSpecAST(window); + final ASTNode wRangeAst = getWindowRangeAST(window); + if (wPSpecAst != null) + wSpec.addChild(wPSpecAst); + if (wRangeAst != null) + wSpec.addChild(wRangeAst); + + return wUDAFAst; + } + + @Override + public ASTNode visitCall(RexCall call) { + if (!deep) { + return null; + } + + SqlOperator op = call.getOperator(); + List astNodeLst = new LinkedList(); + if (op.kind == SqlKind.CAST) { + HiveToken ht = TypeConverter.hiveToken(call.getType()); + ASTBuilder astBldr = ASTBuilder.construct(ht.type, ht.text); + if (ht.args != null) { + for (String castArg : ht.args) + astBldr.add(HiveParser.Identifier, castArg); + } + astNodeLst.add(astBldr.node()); + } + + for (RexNode operand : call.operands) { + astNodeLst.add(operand.accept(this)); + } + + if (isFlat(call)) + return SqlFunctionConverter.buildAST(op, astNodeLst, 0); + else + return SqlFunctionConverter.buildAST(op, astNodeLst); + } + } + + static class QueryBlockInfo { + Schema schema; + ASTNode ast; + + public QueryBlockInfo(Schema schema, ASTNode ast) { + super(); + this.schema = schema; + this.ast = ast; + } + } + + /* + * represents the schema exposed by a QueryBlock. + */ + static class Schema extends ArrayList { + + private static final long serialVersionUID = 1L; + + Schema(TableAccessRelBase scan) { + String tabName = ((RelOptHiveTable) scan.getTable()).getTableAlias(); + for (RelDataTypeField field : scan.getRowType().getFieldList()) { + add(new ColumnInfo(tabName, field.getName())); + } + } + + Schema(ProjectRelBase select, String alias) { + for (RelDataTypeField field : select.getRowType().getFieldList()) { + add(new ColumnInfo(alias, field.getName())); + } + } + + Schema(UnionRelBase unionRel, String alias) { + for (RelDataTypeField field : unionRel.getRowType().getFieldList()) { + add(new ColumnInfo(alias, field.getName())); + } + } + + @SuppressWarnings("unchecked") + Schema(Schema left, Schema right) { + for (ColumnInfo cI : Iterables.concat(left, right)) { + add(cI); + } + } + + Schema(Schema src, AggregateRelBase gBy) { + for (int i : BitSets.toIter(gBy.getGroupSet())) { + ColumnInfo cI = src.get(i); + add(cI); + } + List aggs = gBy.getAggCallList(); + for (AggregateCall agg : aggs) { + int argCount = agg.getArgList().size(); + ASTBuilder b = agg.isDistinct() ? ASTBuilder.construct(HiveParser.TOK_FUNCTIONDI, + "TOK_FUNCTIONDI") : argCount == 0 ? ASTBuilder.construct(HiveParser.TOK_FUNCTIONSTAR, + "TOK_FUNCTIONSTAR") : ASTBuilder.construct(HiveParser.TOK_FUNCTION, "TOK_FUNCTION"); + b.add(HiveParser.Identifier, agg.getAggregation().getName()); + for (int i : agg.getArgList()) { + RexInputRef iRef = new RexInputRef(i, gBy.getCluster().getTypeFactory() + .createSqlType(SqlTypeName.ANY)); + b.add(iRef.accept(new RexVisitor(src))); + } + add(new ColumnInfo(null, b.node())); + } + } + + /** + * Assumption:
+ * 1. ProjectRel will always be child of SortRel.
+ * 2. In Optiq every projection in ProjectRelBase is uniquely named + * (unambigous) without using table qualifier (table name).
+ * + * @param order + * Hive Sort Rel Node + * @return Schema + */ + public Schema(HiveSortRel order) { + ProjectRelBase select = (ProjectRelBase) order.getChild(); + for (String projName : select.getRowType().getFieldNames()) { + add(new ColumnInfo(null, projName)); + } + } + } + + /* + * represents Column information exposed by a QueryBlock. + */ + static class ColumnInfo { + String table; + String column; + ASTNode agg; + + ColumnInfo(String table, String column) { + super(); + this.table = table; + this.column = column; + } + + ColumnInfo(String table, ASTNode agg) { + super(); + this.table = table; + this.agg = agg; + } + + ColumnInfo(String alias, ColumnInfo srcCol) { + this.table = alias; + this.column = srcCol.column; + this.agg = srcCol.agg; + } + } + + private String nextAlias() { + String tabAlias = String.format("$hdt$_%d", derivedTableCount); + derivedTableCount++; + return tabAlias; + } + + static class HiveAST { + + ASTNode from; + ASTNode where; + ASTNode groupBy; + ASTNode having; + ASTNode select; + ASTNode order; + ASTNode limit; + + public ASTNode getAST() { + ASTBuilder b = ASTBuilder + .construct(HiveParser.TOK_QUERY, "TOK_QUERY") + .add(from) + .add( + ASTBuilder.construct(HiveParser.TOK_INSERT, "TOK_INSERT").add(ASTBuilder.destNode()) + .add(select).add(where).add(groupBy).add(having).add(order).add(limit)); + return b.node(); + } + } + + public ASTNode getUnionAllAST(ASTNode leftAST, ASTNode rightAST) { + + ASTNode unionTokAST = ASTBuilder.construct(HiveParser.TOK_UNION, "TOK_UNION").add(leftAST) + .add(rightAST).node(); + + return unionTokAST; + } + + public static boolean isFlat(RexCall call) { + boolean flat = false; + if (call.operands != null && call.operands.size() > 2) { + SqlOperator op = call.getOperator(); + if (op.getKind() == SqlKind.AND || op.getKind() == SqlKind.OR) { + flat = true; + } + } + + return flat; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java new file mode 100644 index 0000000..e6e491f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ExprNodeConverter.java @@ -0,0 +1,156 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.sql.Date; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.LinkedList; +import java.util.List; + +import org.apache.hadoop.hive.common.type.HiveChar; +import org.apache.hadoop.hive.common.type.HiveVarchar; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexLiteral; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexVisitorImpl; + +/* + * convert a RexNode to an ExprNodeDesc + */ +public class ExprNodeConverter extends RexVisitorImpl { + + RelDataType rType; + String tabAlias; + boolean partitioningExpr; + + public ExprNodeConverter(String tabAlias, RelDataType rType, boolean partitioningExpr) { + super(true); + /* + * hb: 6/25/14 for now we only support expressions that only contain + * partition cols. there is no use case for supporting generic expressions. + * for supporting generic exprs., we need to give the converter information + * on whether a column is a partition column or not, whether a column is a + * virtual column or not. + */ + assert partitioningExpr == true; + this.tabAlias = tabAlias; + this.rType = rType; + this.partitioningExpr = partitioningExpr; + } + + @Override + public ExprNodeDesc visitInputRef(RexInputRef inputRef) { + RelDataTypeField f = rType.getFieldList().get(inputRef.getIndex()); + return new ExprNodeColumnDesc(TypeConverter.convert(f.getType()), f.getName(), tabAlias, + partitioningExpr); + } + + @Override + public ExprNodeDesc visitCall(RexCall call) { + ExprNodeGenericFuncDesc gfDesc = null; + + if (!deep) { + return null; + } + + List args = new LinkedList(); + + for (RexNode operand : call.operands) { + args.add(operand.accept(this)); + } + + // If Expr is flat (and[p,q,r,s] or[p,q,r,s]) then recursively build the + // exprnode + if (ASTConverter.isFlat(call)) { + ArrayList tmpExprArgs = new ArrayList(); + tmpExprArgs.addAll(args.subList(0, 2)); + gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), tmpExprArgs); + for (int i = 2; i < call.operands.size(); i++) { + tmpExprArgs = new ArrayList(); + tmpExprArgs.add(gfDesc); + tmpExprArgs.add(args.get(i)); + gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), tmpExprArgs); + } + } else { + gfDesc = new ExprNodeGenericFuncDesc(TypeConverter.convert(call.getType()), + SqlFunctionConverter.getHiveUDF(call.getOperator(), call.getType()), args); + } + + return gfDesc; + } + + @Override + public ExprNodeDesc visitLiteral(RexLiteral literal) { + RelDataType lType = literal.getType(); + + switch (literal.getType().getSqlTypeName()) { + case BOOLEAN: + return new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, Boolean.valueOf(RexLiteral + .booleanValue(literal))); + case TINYINT: + return new ExprNodeConstantDesc(TypeInfoFactory.byteTypeInfo, Byte.valueOf(((Number) literal + .getValue3()).byteValue())); + case SMALLINT: + return new ExprNodeConstantDesc(TypeInfoFactory.shortTypeInfo, + Short.valueOf(((Number) literal.getValue3()).shortValue())); + case INTEGER: + return new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, + Integer.valueOf(((Number) literal.getValue3()).intValue())); + case BIGINT: + return new ExprNodeConstantDesc(TypeInfoFactory.longTypeInfo, Long.valueOf(((Number) literal + .getValue3()).longValue())); + case FLOAT: + return new ExprNodeConstantDesc(TypeInfoFactory.floatTypeInfo, + Float.valueOf(((Number) literal.getValue3()).floatValue())); + case DOUBLE: + return new ExprNodeConstantDesc(TypeInfoFactory.doubleTypeInfo, + Double.valueOf(((Number) literal.getValue3()).doubleValue())); + case DATE: + return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo, + new Date(((Calendar)literal.getValue()).getTimeInMillis())); + case TIMESTAMP: + return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo, literal.getValue3()); + case BINARY: + return new ExprNodeConstantDesc(TypeInfoFactory.binaryTypeInfo, literal.getValue3()); + case DECIMAL: + return new ExprNodeConstantDesc(TypeInfoFactory.getDecimalTypeInfo(lType.getPrecision(), + lType.getScale()), literal.getValue3()); + case VARCHAR: + return new ExprNodeConstantDesc(TypeInfoFactory.getVarcharTypeInfo(lType.getPrecision()), + new HiveVarchar((String) literal.getValue3(), lType.getPrecision())); + case CHAR: + return new ExprNodeConstantDesc(TypeInfoFactory.getCharTypeInfo(lType.getPrecision()), + new HiveChar((String) literal.getValue3(), lType.getPrecision())); + case OTHER: + default: + return new ExprNodeConstantDesc(TypeInfoFactory.voidTypeInfo, literal.getValue3()); + } + } + +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java new file mode 100644 index 0000000..406c18e --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinCondTypeCheckProcFactory.java @@ -0,0 +1,316 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + +import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.FunctionInfo; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.TypeCheckCtx; +import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; + +/** + * JoinCondTypeCheckProcFactory is used by Optiq planner(CBO) to generate Join Conditions from Join Condition AST. + * Reasons for sub class: + * 1. Additional restrictions on what is supported in Join Conditions + * 2. Column handling is different + * 3. Join Condn expr has two input RR as opposed to one. + */ + +/** + * TODO:
+ * 1. Could we use combined RR instead of list of RR ?
+ * 2. Use Column Processing from TypeCheckProcFactory
+ * 3. Why not use GB expr ? + */ +public class JoinCondTypeCheckProcFactory extends TypeCheckProcFactory { + + public static Map genExprNode(ASTNode expr, TypeCheckCtx tcCtx) + throws SemanticException { + return TypeCheckProcFactory.genExprNode(expr, tcCtx, new JoinCondTypeCheckProcFactory()); + } + + /** + * Processor for table columns. + */ + public static class JoinCondColumnExprProcessor extends ColumnExprProcessor { + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + Object... nodeOutputs) throws SemanticException { + + JoinTypeCheckCtx ctx = (JoinTypeCheckCtx) procCtx; + if (ctx.getError() != null) { + return null; + } + + ASTNode expr = (ASTNode) nd; + ASTNode parent = stack.size() > 1 ? (ASTNode) stack.get(stack.size() - 2) : null; + + if (expr.getType() != HiveParser.TOK_TABLE_OR_COL) { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr), expr); + return null; + } + + assert (expr.getChildCount() == 1); + String tableOrCol = BaseSemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getText()); + + boolean qualifiedAccess = (parent != null && parent.getType() == HiveParser.DOT); + + ColumnInfo colInfo = null; + if (!qualifiedAccess) { + colInfo = getColInfo(ctx, null, tableOrCol, expr); + // It's a column. + return new ExprNodeColumnDesc(colInfo.getType(), colInfo.getInternalName(), + colInfo.getTabAlias(), colInfo.getIsVirtualCol()); + } else if (hasTableAlias(ctx, tableOrCol, expr)) { + return null; + } else { + throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(expr)); + } + } + + private static boolean hasTableAlias(JoinTypeCheckCtx ctx, String tabName, ASTNode expr) + throws SemanticException { + int tblAliasCnt = 0; + for (RowResolver rr : ctx.getInputRRList()) { + if (rr.hasTableAlias(tabName)) + tblAliasCnt++; + } + + if (tblAliasCnt > 1) { + throw new SemanticException(ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr)); + } + + return (tblAliasCnt == 1) ? true : false; + } + + private static ColumnInfo getColInfo(JoinTypeCheckCtx ctx, String tabName, String colAlias, + ASTNode expr) throws SemanticException { + ColumnInfo tmp; + ColumnInfo cInfoToRet = null; + + for (RowResolver rr : ctx.getInputRRList()) { + tmp = rr.get(tabName, colAlias); + if (tmp != null) { + if (cInfoToRet != null) { + throw new SemanticException(ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr)); + } + cInfoToRet = tmp; + } + } + + return cInfoToRet; + } + } + + /** + * Factory method to get ColumnExprProcessor. + * + * @return ColumnExprProcessor. + */ + @Override + public ColumnExprProcessor getColumnExprProcessor() { + return new JoinCondColumnExprProcessor(); + } + + /** + * The default processor for typechecking. + */ + public static class JoinCondDefaultExprProcessor extends DefaultExprProcessor { + @Override + protected List getReferenceableColumnAliases(TypeCheckCtx ctx) { + JoinTypeCheckCtx jCtx = (JoinTypeCheckCtx) ctx; + List possibleColumnNames = new ArrayList(); + for (RowResolver rr : jCtx.getInputRRList()) { + possibleColumnNames.addAll(rr.getReferenceableColumnAliases(null, -1)); + } + + return possibleColumnNames; + } + + @Override + protected ExprNodeColumnDesc processQualifiedColRef(TypeCheckCtx ctx, ASTNode expr, + Object... nodeOutputs) throws SemanticException { + String tableAlias = BaseSemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getChild(0) + .getText()); + // NOTE: tableAlias must be a valid non-ambiguous table alias, + // because we've checked that in TOK_TABLE_OR_COL's process method. + ColumnInfo colInfo = getColInfo((JoinTypeCheckCtx) ctx, tableAlias, + ((ExprNodeConstantDesc) nodeOutputs[1]).getValue().toString(), expr); + + if (colInfo == null) { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(1)), expr); + return null; + } + return new ExprNodeColumnDesc(colInfo.getType(), colInfo.getInternalName(), tableAlias, + colInfo.getIsVirtualCol()); + } + + private static ColumnInfo getColInfo(JoinTypeCheckCtx ctx, String tabName, String colAlias, + ASTNode expr) throws SemanticException { + ColumnInfo tmp; + ColumnInfo cInfoToRet = null; + + for (RowResolver rr : ctx.getInputRRList()) { + tmp = rr.get(tabName, colAlias); + if (tmp != null) { + if (cInfoToRet != null) { + throw new SemanticException(ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr)); + } + cInfoToRet = tmp; + } + } + + return cInfoToRet; + } + + @Override + protected void validateUDF(ASTNode expr, boolean isFunction, TypeCheckCtx ctx, FunctionInfo fi, + List children, GenericUDF genericUDF) throws SemanticException { + super.validateUDF(expr, isFunction, ctx, fi, children, genericUDF); + + JoinTypeCheckCtx jCtx = (JoinTypeCheckCtx) ctx; + + // Join Condition can not contain disjunctions + if (genericUDF instanceof GenericUDFOPOr) { + throw new SemanticException(ErrorMsg.INVALID_JOIN_CONDITION_3.getMsg(expr)); + } + + // Non Conjunctive elements have further limitations in Join conditions + if (!(genericUDF instanceof GenericUDFOPAnd)) { + // Non Comparison UDF other than 'and' can not use inputs from both side + if (!(genericUDF instanceof GenericUDFBaseCompare)) { + if (genericUDFargsRefersToBothInput(genericUDF, children, jCtx.getInputRRList())) { + throw new SemanticException(ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr)); + } + } else if (genericUDF instanceof GenericUDFBaseCompare) { + // Comparisons of non literals LHS/RHS can not refer to inputs from + // both sides + if (children.size() == 2 && !(children.get(0) instanceof ExprNodeConstantDesc) + && !(children.get(1) instanceof ExprNodeConstantDesc)) { + if (comparisonUDFargsRefersToBothInput((GenericUDFBaseCompare) genericUDF, children, + jCtx.getInputRRList())) { + throw new SemanticException(ErrorMsg.INVALID_JOIN_CONDITION_1.getMsg(expr)); + } + } + } + } + } + + private static boolean genericUDFargsRefersToBothInput(GenericUDF udf, + List children, List inputRRList) { + boolean argsRefersToBothInput = false; + + Map hasCodeToColDescMap = new HashMap(); + for (ExprNodeDesc child : children) { + ExprNodeDescUtils.getExprNodeColumnDesc(child, hasCodeToColDescMap); + } + Set inputRef = getInputRef(hasCodeToColDescMap.values(), inputRRList); + + if (inputRef.size() > 1) + argsRefersToBothInput = true; + + return argsRefersToBothInput; + } + + private static boolean comparisonUDFargsRefersToBothInput(GenericUDFBaseCompare comparisonUDF, + List children, List inputRRList) { + boolean argsRefersToBothInput = false; + + Map lhsHashCodeToColDescMap = new HashMap(); + Map rhsHashCodeToColDescMap = new HashMap(); + ExprNodeDescUtils.getExprNodeColumnDesc(children.get(0), lhsHashCodeToColDescMap); + ExprNodeDescUtils.getExprNodeColumnDesc(children.get(1), rhsHashCodeToColDescMap); + Set lhsInputRef = getInputRef(lhsHashCodeToColDescMap.values(), inputRRList); + Set rhsInputRef = getInputRef(rhsHashCodeToColDescMap.values(), inputRRList); + + if (lhsInputRef.size() > 1 || rhsInputRef.size() > 1) + argsRefersToBothInput = true; + + return argsRefersToBothInput; + } + + private static Set getInputRef(Collection colDescSet, + List inputRRList) { + String tableAlias; + RowResolver inputRR; + Set inputLineage = new HashSet(); + + for (ExprNodeDesc col : colDescSet) { + ExprNodeColumnDesc colDesc = (ExprNodeColumnDesc) col; + tableAlias = colDesc.getTabAlias(); + + for (int i = 0; i < inputRRList.size(); i++) { + inputRR = inputRRList.get(i); + + // If table Alias is present check if InputRR has that table and then + // check for internal name + // else if table alias is null then check with internal name in all + // inputRR. + if (tableAlias != null) { + if (inputRR.hasTableAlias(tableAlias)) { + if (inputRR.getInvRslvMap().containsKey(colDesc.getColumn())) { + inputLineage.add(i); + } + } + } else { + if (inputRR.getInvRslvMap().containsKey(colDesc.getColumn())) { + inputLineage.add(i); + } + } + } + } + + return inputLineage; + } + } + + /** + * Factory method to get DefaultExprProcessor. + * + * @return DefaultExprProcessor. + */ + @Override + public DefaultExprProcessor getDefaultExprProcessor() { + return new JoinCondDefaultExprProcessor(); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinTypeCheckCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinTypeCheckCtx.java new file mode 100644 index 0000000..fdee66b --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/JoinTypeCheckCtx.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.util.List; + +import org.apache.hadoop.hive.ql.parse.JoinType; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.parse.TypeCheckCtx; + +import com.google.common.collect.ImmutableList; + +/** + * JoinTypeCheckCtx is used by Optiq planner(CBO) to generate Join Conditions from Join Condition AST. + * Reasons for sub class: + * 1. Join Conditions can not handle: + * a. Stateful Functions + * b. Distinct + * c. '*' expr + * d. '.*' expr + * e. Windowing expr + * f. Complex type member access + * g. Array Index Access + * h. Sub query + * i. GB expr elimination + * 2. Join Condn expr has two input RR as opposed to one. + */ + +/** + * TODO:
+ * 1. Could we use combined RR instead of list of RR ?
+ * 2. Why not use GB expr ? + */ +public class JoinTypeCheckCtx extends TypeCheckCtx { + private final ImmutableList inputRRLst; + private final boolean outerJoin; + + public JoinTypeCheckCtx(RowResolver leftRR, RowResolver rightRR, JoinType hiveJoinType) + throws SemanticException { + super(RowResolver.getCombinedRR(leftRR, rightRR), false, false, false, false, false, false, + false, false); + this.inputRRLst = ImmutableList.of(leftRR, rightRR); + this.outerJoin = (hiveJoinType == JoinType.LEFTOUTER) || (hiveJoinType == JoinType.RIGHTOUTER) + || (hiveJoinType == JoinType.FULLOUTER); + } + + /** + * @return the inputRR List + */ + public List getInputRRList() { + return inputRRLst; + } + + public boolean isOuterJoin() { + return outerJoin; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/PlanModifierForASTConv.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/PlanModifierForASTConv.java new file mode 100644 index 0000000..3d90ae7 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/PlanModifierForASTConv.java @@ -0,0 +1,313 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveAggregateRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel; +import org.eigenbase.rel.AggregateRelBase; +import org.eigenbase.rel.EmptyRel; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.OneRowRelBase; +import org.eigenbase.rel.ProjectRelBase; +import org.eigenbase.rel.RelCollationImpl; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.SetOpRel; +import org.eigenbase.rel.SingleRel; +import org.eigenbase.rel.SortRel; +import org.eigenbase.rel.rules.MultiJoinRel; +import org.eigenbase.relopt.hep.HepRelVertex; +import org.eigenbase.relopt.volcano.RelSubset; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.rex.RexNode; +import org.eigenbase.util.Pair; + +import com.google.common.collect.ImmutableMap; + +public class PlanModifierForASTConv { + + public static RelNode convertOpTree(RelNode rel, List resultSchema) + throws OptiqSemanticException { + RelNode newTopNode = rel; + + if (!(newTopNode instanceof ProjectRelBase) && !(newTopNode instanceof SortRel)) { + newTopNode = introduceDerivedTable(newTopNode); + } + + convertOpTree(newTopNode, (RelNode) null); + + Pair topSelparentPair = HiveOptiqUtil.getTopLevelSelect(newTopNode); + fixTopOBSchema(newTopNode, topSelparentPair, resultSchema); + topSelparentPair = HiveOptiqUtil.getTopLevelSelect(newTopNode); + newTopNode = renameTopLevelSelectInResultSchema(newTopNode, topSelparentPair, resultSchema); + + return newTopNode; + } + + private static void convertOpTree(RelNode rel, RelNode parent) { + + if (rel instanceof EmptyRel) { + throw new RuntimeException("Found Empty Rel"); + } else if (rel instanceof HepRelVertex) { + throw new RuntimeException("Found HepRelVertex"); + } else if (rel instanceof JoinRelBase) { + if (!validJoinParent(rel, parent)) { + introduceDerivedTable(rel, parent); + } + } else if (rel instanceof MultiJoinRel) { + throw new RuntimeException("Found MultiJoinRel"); + } else if (rel instanceof OneRowRelBase) { + throw new RuntimeException("Found OneRowRelBase"); + } else if (rel instanceof RelSubset) { + throw new RuntimeException("Found RelSubset"); + } else if (rel instanceof SetOpRel) { + // TODO: Handle more than 2 inputs for setop + if (!validSetopParent(rel, parent)) + introduceDerivedTable(rel, parent); + + SetOpRel setopRel = (SetOpRel) rel; + for (RelNode inputRel : setopRel.getInputs()) { + if (!validSetopChild(inputRel)) { + introduceDerivedTable(inputRel, setopRel); + } + } + } else if (rel instanceof SingleRel) { + if (rel instanceof FilterRelBase) { + if (!validFilterParent(rel, parent)) { + introduceDerivedTable(rel, parent); + } + } else if (rel instanceof HiveSortRel) { + if (!validSortParent(rel, parent)) { + introduceDerivedTable(rel, parent); + } + if (!validSortChild((HiveSortRel) rel)) { + introduceDerivedTable(((HiveSortRel) rel).getChild(), rel); + } + } else if (rel instanceof HiveAggregateRel) { + if (!validGBParent(rel, parent)) { + introduceDerivedTable(rel, parent); + } + } + } + + List childNodes = rel.getInputs(); + if (childNodes != null) { + for (RelNode r : childNodes) { + convertOpTree(r, rel); + } + } + } + + private static void fixTopOBSchema(final RelNode rootRel, + Pair topSelparentPair, List resultSchema) + throws OptiqSemanticException { + if (topSelparentPair.getKey() instanceof SortRel + && HiveOptiqUtil.orderRelNode(topSelparentPair.getKey())) { + HiveSortRel obRel = (HiveSortRel) topSelparentPair.getKey(); + ProjectRelBase obChild = (ProjectRelBase) topSelparentPair.getValue(); + + if (obChild.getRowType().getFieldCount() > resultSchema.size()) { + RelDataType rt = obChild.getRowType(); + Set collationInputRefs = new HashSet(RelCollationImpl.ordinals(obRel + .getCollation())); + ImmutableMap.Builder inputRefToCallMapBldr = ImmutableMap.builder(); + for (int i = resultSchema.size(); i < rt.getFieldCount(); i++) { + if (collationInputRefs.contains(i)) { + inputRefToCallMapBldr.put(i, obChild.getChildExps().get(i)); + } + } + + ImmutableMap inputRefToCallMap = inputRefToCallMapBldr.build(); + if ((obChild.getRowType().getFieldCount() - inputRefToCallMap.size()) == resultSchema + .size()) { + HiveProjectRel replacementProjectRel = HiveProjectRel.create(obChild.getChild(), obChild + .getChildExps().subList(0, resultSchema.size()), obChild.getRowType().getFieldNames() + .subList(0, resultSchema.size())); + obRel.replaceInput(0, replacementProjectRel); + obRel.setInputRefToCallMap(inputRefToCallMap); + } else { + throw new OptiqSemanticException( + "Result Schema didn't match Optiq Optimized Op Tree Schema"); + } + } + } + } + + private static RelNode renameTopLevelSelectInResultSchema(final RelNode rootRel, + Pair topSelparentPair, List resultSchema) + throws OptiqSemanticException { + RelNode parentOforiginalProjRel = topSelparentPair.getKey(); + HiveProjectRel originalProjRel = (HiveProjectRel) topSelparentPair.getValue(); + + // Assumption: top portion of tree could only be + // (limit)?(OB)?(ProjectRelBase).... + List rootChildExps = originalProjRel.getChildExps(); + if (resultSchema.size() != rootChildExps.size()) { + // this is a bug in Hive where for queries like select key,value,value + // convertRowSchemaToResultSetSchema() only returns schema containing + // key,value + // Underlying issue is much deeper because it seems like RowResolver + // itself doesnt have + // those mappings. see limit_pushdown.q & limit_pushdown_negative.q + // Till Hive issue is fixed, disable CBO for such queries. + throw new OptiqSemanticException("Result Schema didn't match Optiq Optimized Op Tree Schema"); + } + + List newSelAliases = new ArrayList(); + String colAlias; + for (int i = 0; i < rootChildExps.size(); i++) { + colAlias = resultSchema.get(i).getName(); + if (colAlias.startsWith("_")) + colAlias = colAlias.substring(1); + newSelAliases.add(colAlias); + } + + HiveProjectRel replacementProjectRel = HiveProjectRel.create(originalProjRel.getChild(), + originalProjRel.getChildExps(), newSelAliases); + + if (rootRel == originalProjRel) { + return replacementProjectRel; + } else { + parentOforiginalProjRel.replaceInput(0, replacementProjectRel); + return rootRel; + } + } + + private static RelNode introduceDerivedTable(final RelNode rel) { + List projectList = HiveOptiqUtil.getProjsFromBelowAsInputRef(rel); + + HiveProjectRel select = HiveProjectRel.create(rel.getCluster(), rel, projectList, + rel.getRowType(), rel.getCollationList()); + + return select; + } + + private static void introduceDerivedTable(final RelNode rel, RelNode parent) { + int i = 0; + int pos = -1; + List childList = parent.getInputs(); + + for (RelNode child : childList) { + if (child == rel) { + pos = i; + break; + } + i++; + } + + if (pos == -1) { + throw new RuntimeException("Couldn't find child node in parent's inputs"); + } + + RelNode select = introduceDerivedTable(rel); + + parent.replaceInput(pos, select); + } + + private static boolean validJoinParent(RelNode joinNode, RelNode parent) { + boolean validParent = true; + + if (parent instanceof JoinRelBase) { + if (((JoinRelBase) parent).getRight() == joinNode) { + validParent = false; + } + } else if (parent instanceof SetOpRel) { + validParent = false; + } + + return validParent; + } + + private static boolean validFilterParent(RelNode filterNode, RelNode parent) { + boolean validParent = true; + + // TOODO: Verify GB having is not a seperate filter (if so we shouldn't + // introduce derived table) + if (parent instanceof FilterRelBase || parent instanceof JoinRelBase + || parent instanceof SetOpRel) { + validParent = false; + } + + return validParent; + } + + private static boolean validGBParent(RelNode gbNode, RelNode parent) { + boolean validParent = true; + + // TOODO: Verify GB having is not a seperate filter (if so we shouldn't + // introduce derived table) + if (parent instanceof JoinRelBase || parent instanceof SetOpRel + || parent instanceof AggregateRelBase + || (parent instanceof FilterRelBase && ((AggregateRelBase) gbNode).getGroupSet().isEmpty())) { + validParent = false; + } + + return validParent; + } + + private static boolean validSortParent(RelNode sortNode, RelNode parent) { + boolean validParent = true; + + if (parent != null && !(parent instanceof ProjectRelBase) + && !((parent instanceof SortRel) || HiveOptiqUtil.orderRelNode(parent))) + validParent = false; + + return validParent; + } + + private static boolean validSortChild(HiveSortRel sortNode) { + boolean validChild = true; + RelNode child = sortNode.getChild(); + + if (!(HiveOptiqUtil.limitRelNode(sortNode) && HiveOptiqUtil.orderRelNode(child)) + && !(child instanceof ProjectRelBase)) { + validChild = false; + } + + return validChild; + } + + private static boolean validSetopParent(RelNode setop, RelNode parent) { + boolean validChild = true; + + if (parent != null && !(parent instanceof ProjectRelBase)) { + validChild = false; + } + + return validChild; + } + + private static boolean validSetopChild(RelNode setopChild) { + boolean validChild = true; + + if (!(setopChild instanceof ProjectRelBase)) { + validChild = false; + } + + return validChild; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java new file mode 100644 index 0000000..ec85603 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/RexNodeConverter.java @@ -0,0 +1,419 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import net.hydromatic.avatica.ByteString; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.common.type.Decimal128; +import org.apache.hadoop.hive.common.type.HiveChar; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.common.type.HiveVarchar; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException; +import org.apache.hadoop.hive.ql.parse.ParseUtils; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseCompare; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBaseNumeric; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDecimal; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar; +import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.eigenbase.rel.RelNode; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeFactory; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexCall; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexUtil; +import org.eigenbase.sql.SqlOperator; +import org.eigenbase.sql.fun.SqlCastFunction; +import org.eigenbase.sql.type.SqlTypeName; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; +import com.google.common.collect.ImmutableMap; + +public class RexNodeConverter { + private static final Log LOG = LogFactory.getLog(RexNodeConverter.class); + + private static class InputCtx { + private final RelDataType m_optiqInpDataType; + private final ImmutableMap m_hiveNameToPosMap; + private final RowResolver m_hiveRR; + private final int m_offsetInOptiqSchema; + + private InputCtx(RelDataType optiqInpDataType, ImmutableMap hiveNameToPosMap, + RowResolver hiveRR, int offsetInOptiqSchema) { + m_optiqInpDataType = optiqInpDataType; + m_hiveNameToPosMap = hiveNameToPosMap; + m_hiveRR = hiveRR; + m_offsetInOptiqSchema = offsetInOptiqSchema; + } + }; + + private final RelOptCluster m_cluster; + private final ImmutableList m_inputCtxs; + private final boolean m_flattenExpr; + + public RexNodeConverter(RelOptCluster cluster, RelDataType inpDataType, + ImmutableMap nameToPosMap, int offset, boolean flattenExpr) { + this.m_cluster = cluster; + m_inputCtxs = ImmutableList.of(new InputCtx(inpDataType, nameToPosMap, null, offset)); + m_flattenExpr = flattenExpr; + } + + public RexNodeConverter(RelOptCluster cluster, List inpCtxLst, boolean flattenExpr) { + this.m_cluster = cluster; + m_inputCtxs = ImmutableList. builder().addAll(inpCtxLst).build(); + m_flattenExpr = flattenExpr; + } + + public RexNode convert(ExprNodeDesc expr) throws SemanticException { + if (expr instanceof ExprNodeNullDesc) { + return createNullLiteral(expr); + } else if (expr instanceof ExprNodeGenericFuncDesc) { + return convert((ExprNodeGenericFuncDesc) expr); + } else if (expr instanceof ExprNodeConstantDesc) { + return convert((ExprNodeConstantDesc) expr); + } else if (expr instanceof ExprNodeColumnDesc) { + return convert((ExprNodeColumnDesc) expr); + } else if (expr instanceof ExprNodeFieldDesc) { + return convert((ExprNodeFieldDesc) expr); + } else { + throw new RuntimeException("Unsupported Expression"); + } + // TODO: handle ExprNodeColumnListDesc + } + + private RexNode convert(final ExprNodeFieldDesc fieldDesc) throws SemanticException { + RexNode rexNode = convert(fieldDesc.getDesc()); + if (rexNode instanceof RexCall) { + // regular case of accessing nested field in a column + return m_cluster.getRexBuilder().makeFieldAccess(rexNode, fieldDesc.getFieldName(), true); + } else { + // This may happen for schema-less tables, where columns are dynamically + // supplied by serdes. + throw new OptiqSemanticException("Unexpected rexnode : " + + rexNode.getClass().getCanonicalName()); + } + } + + private RexNode convert(final ExprNodeGenericFuncDesc func) throws SemanticException { + ExprNodeDesc tmpExprNode; + RexNode tmpRN; + + List childRexNodeLst = new LinkedList(); + Builder argTypeBldr = ImmutableList. builder(); + + // TODO: 1) Expand to other functions as needed 2) What about types other than primitive. + TypeInfo tgtDT = null; + GenericUDF tgtUdf = func.getGenericUDF(); + boolean isNumeric = tgtUdf instanceof GenericUDFBaseNumeric, + isCompare = !isNumeric && tgtUdf instanceof GenericUDFBaseCompare; + if (isNumeric) { + tgtDT = func.getTypeInfo(); + + assert func.getChildren().size() == 2; + // TODO: checking 2 children is useless, compare already does that. + } else if (isCompare && (func.getChildren().size() == 2)) { + tgtDT = FunctionRegistry.getCommonClassForComparison(func.getChildren().get(0) + .getTypeInfo(), func.getChildren().get(1).getTypeInfo()); + } + + + for (ExprNodeDesc childExpr : func.getChildren()) { + tmpExprNode = childExpr; + if (tgtDT != null + && TypeInfoUtils.isConversionRequiredForComparison(tgtDT, childExpr.getTypeInfo())) { + if (isCompare) { + // For compare, we will convert requisite children + tmpExprNode = ParseUtils.createConversionCast(childExpr, (PrimitiveTypeInfo) tgtDT); + } else if (isNumeric) { + // For numeric, we'll do minimum necessary cast - if we cast to the type + // of expression, bad things will happen. + GenericUDFBaseNumeric numericUdf = (GenericUDFBaseNumeric)tgtUdf; + PrimitiveTypeInfo minArgType = numericUdf.deriveMinArgumentCast(childExpr, tgtDT); + tmpExprNode = ParseUtils.createConversionCast(childExpr, minArgType); + } else { + throw new AssertionError("Unexpected " + tgtDT + " - not a numeric op or compare"); + } + + } + argTypeBldr.add(TypeConverter.convert(tmpExprNode.getTypeInfo(), m_cluster.getTypeFactory())); + tmpRN = convert(tmpExprNode); + childRexNodeLst.add(tmpRN); + } + + // See if this is an explicit cast. + RexNode expr = null; + RelDataType retType = null; + expr = handleExplicitCast(func, childRexNodeLst); + + if (expr == null) { + // This is not a cast; process the function. + retType = TypeConverter.convert(func.getTypeInfo(), m_cluster.getTypeFactory()); + SqlOperator optiqOp = SqlFunctionConverter.getOptiqOperator(func.getFuncText(), + func.getGenericUDF(), argTypeBldr.build(), retType); + expr = m_cluster.getRexBuilder().makeCall(optiqOp, childRexNodeLst); + } else { + retType = expr.getType(); + } + + // TODO: Cast Function in Optiq have a bug where it infertype on cast throws + // an exception + if (m_flattenExpr && (expr instanceof RexCall) + && !(((RexCall) expr).getOperator() instanceof SqlCastFunction)) { + RexCall call = (RexCall) expr; + expr = m_cluster.getRexBuilder().makeCall(retType, call.getOperator(), + RexUtil.flatten(call.getOperands(), call.getOperator())); + } + + return expr; + } + + private boolean castExprUsingUDFBridge(GenericUDF gUDF) { + boolean castExpr = false; + if (gUDF != null && gUDF instanceof GenericUDFBridge) { + String udfClassName = ((GenericUDFBridge) gUDF).getUdfClassName(); + if (udfClassName != null) { + int sp = udfClassName.lastIndexOf('.'); + // TODO: add method to UDFBridge to say if it is a cast func + if (sp >= 0 & (sp + 1) < udfClassName.length()) { + udfClassName = udfClassName.substring(sp + 1); + if (udfClassName.equals("UDFToBoolean") || udfClassName.equals("UDFToByte") + || udfClassName.equals("UDFToDouble") || udfClassName.equals("UDFToInteger") + || udfClassName.equals("UDFToLong") || udfClassName.equals("UDFToShort") + || udfClassName.equals("UDFToFloat") || udfClassName.equals("UDFToString")) + castExpr = true; + } + } + } + + return castExpr; + } + + private RexNode handleExplicitCast(ExprNodeGenericFuncDesc func, List childRexNodeLst) + throws OptiqSemanticException { + RexNode castExpr = null; + + if (childRexNodeLst != null && childRexNodeLst.size() == 1) { + GenericUDF udf = func.getGenericUDF(); + if ((udf instanceof GenericUDFToChar) || (udf instanceof GenericUDFToVarchar) + || (udf instanceof GenericUDFToDecimal) || (udf instanceof GenericUDFToDate) + || (udf instanceof GenericUDFToBinary) || castExprUsingUDFBridge(udf)) { + castExpr = m_cluster.getRexBuilder().makeAbstractCast( + TypeConverter.convert(func.getTypeInfo(), m_cluster.getTypeFactory()), + childRexNodeLst.get(0)); + } + } + + return castExpr; + } + + private InputCtx getInputCtx(ExprNodeColumnDesc col) throws SemanticException { + InputCtx ctxLookingFor = null; + + if (m_inputCtxs.size() == 1) { + ctxLookingFor = m_inputCtxs.get(0); + } else { + String tableAlias = col.getTabAlias(); + String colAlias = col.getColumn(); + int noInp = 0; + for (InputCtx ic : m_inputCtxs) { + if (tableAlias == null || ic.m_hiveRR.hasTableAlias(tableAlias)) { + if (ic.m_hiveRR.getPosition(colAlias) >= 0) { + ctxLookingFor = ic; + noInp++; + } + } + } + + if (noInp > 1) + throw new RuntimeException("Ambigous column mapping"); + } + + return ctxLookingFor; + } + + protected RexNode convert(ExprNodeColumnDesc col) throws SemanticException { + InputCtx ic = getInputCtx(col); + int pos = ic.m_hiveNameToPosMap.get(col.getColumn()); + return m_cluster.getRexBuilder().makeInputRef( + ic.m_optiqInpDataType.getFieldList().get(pos).getType(), pos + ic.m_offsetInOptiqSchema); + } + + private static final BigInteger MIN_LONG_BI = BigInteger.valueOf(Long.MIN_VALUE), + MAX_LONG_BI = BigInteger.valueOf(Long.MAX_VALUE); + + protected RexNode convert(ExprNodeConstantDesc literal) throws OptiqSemanticException { + RexBuilder rexBuilder = m_cluster.getRexBuilder(); + RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory(); + PrimitiveTypeInfo hiveType = (PrimitiveTypeInfo) literal.getTypeInfo(); + RelDataType optiqDataType = TypeConverter.convert(hiveType, dtFactory); + + PrimitiveCategory hiveTypeCategory = hiveType.getPrimitiveCategory(); + + ConstantObjectInspector coi = literal.getWritableObjectInspector(); + Object value = ObjectInspectorUtils.copyToStandardJavaObject(coi.getWritableConstantValue(), + coi); + + RexNode optiqLiteral = null; + // TODO: Verify if we need to use ConstantObjectInspector to unwrap data + switch (hiveTypeCategory) { + case BOOLEAN: + optiqLiteral = rexBuilder.makeLiteral(((Boolean) value).booleanValue()); + break; + case BYTE: + byte[] byteArray = new byte[] { (Byte) value }; + ByteString bs = new ByteString(byteArray); + optiqLiteral = rexBuilder.makeBinaryLiteral(bs); + break; + case SHORT: + optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value)); + break; + case INT: + optiqLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Integer) value)); + break; + case LONG: + optiqLiteral = rexBuilder.makeBigintLiteral(new BigDecimal((Long) value)); + break; + // TODO: is Decimal an exact numeric or approximate numeric? + case DECIMAL: + if (value instanceof HiveDecimal) { + value = ((HiveDecimal) value).bigDecimalValue(); + } else if (value instanceof Decimal128) { + value = ((Decimal128) value).toBigDecimal(); + } + if (value == null) { + // We have found an invalid decimal value while enforcing precision and + // scale. Ideally, + // we would replace it with null here, which is what Hive does. However, + // we need to plumb + // this thru up somehow, because otherwise having different expression + // type in AST causes + // the plan generation to fail after CBO, probably due to some residual + // state in SA/QB. + // For now, we will not run CBO in the presence of invalid decimal + // literals. + throw new OptiqSemanticException("Expression " + literal.getExprString() + + " is not a valid decimal"); + // TODO: return createNullLiteral(literal); + } + BigDecimal bd = (BigDecimal) value; + BigInteger unscaled = bd.unscaledValue(); + if (unscaled.compareTo(MIN_LONG_BI) >= 0 && unscaled.compareTo(MAX_LONG_BI) <= 0) { + optiqLiteral = rexBuilder.makeExactLiteral(bd); + } else { + // CBO doesn't support unlimited precision decimals. In practice, this + // will work... + // An alternative would be to throw CboSemanticException and fall back + // to no CBO. + RelDataType relType = m_cluster.getTypeFactory().createSqlType(SqlTypeName.DECIMAL, + bd.scale(), unscaled.toString().length()); + optiqLiteral = rexBuilder.makeExactLiteral(bd, relType); + } + break; + case FLOAT: + optiqLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Float) value), optiqDataType); + break; + case DOUBLE: + optiqLiteral = rexBuilder.makeApproxLiteral(new BigDecimal((Double) value), optiqDataType); + break; + case CHAR: + if (value instanceof HiveChar) + value = ((HiveChar) value).getValue(); + optiqLiteral = rexBuilder.makeLiteral((String) value); + break; + case VARCHAR: + if (value instanceof HiveVarchar) + value = ((HiveVarchar) value).getValue(); + optiqLiteral = rexBuilder.makeLiteral((String) value); + break; + case STRING: + optiqLiteral = rexBuilder.makeLiteral((String) value); + break; + case DATE: + Calendar cal = new GregorianCalendar(); + cal.setTime((Date) value); + optiqLiteral = rexBuilder.makeDateLiteral(cal); + break; + case TIMESTAMP: + optiqLiteral = rexBuilder.makeTimestampLiteral((Calendar) value, + RelDataType.PRECISION_NOT_SPECIFIED); + break; + case BINARY: + case VOID: + case UNKNOWN: + default: + throw new RuntimeException("UnSupported Literal"); + } + + return optiqLiteral; + } + + private RexNode createNullLiteral(ExprNodeDesc expr) throws OptiqSemanticException { + return m_cluster.getRexBuilder().makeNullLiteral( + TypeConverter.convert(expr.getTypeInfo(), m_cluster.getTypeFactory()).getSqlTypeName()); + } + + public static RexNode convert(RelOptCluster cluster, ExprNodeDesc joinCondnExprNode, + List inputRels, LinkedHashMap relToHiveRR, + Map> relToHiveColNameOptiqPosMap, boolean flattenExpr) + throws SemanticException { + List inputCtxLst = new ArrayList(); + + int offSet = 0; + for (RelNode r : inputRels) { + inputCtxLst.add(new InputCtx(r.getRowType(), relToHiveColNameOptiqPosMap.get(r), relToHiveRR + .get(r), offSet)); + offSet += r.getRowType().getFieldCount(); + } + + return (new RexNodeConverter(cluster, inputCtxLst, flattenExpr)).convert(joinCondnExprNode); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java new file mode 100644 index 0000000..31f906a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java @@ -0,0 +1,384 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.lang.annotation.Annotation; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.FunctionInfo; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException; +import org.apache.hadoop.hive.ql.parse.ASTNode; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.apache.hadoop.hive.ql.udf.SettableUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNegative; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPositive; +import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeFactory; +import org.eigenbase.sql.SqlAggFunction; +import org.eigenbase.sql.SqlFunction; +import org.eigenbase.sql.SqlFunctionCategory; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlOperator; +import org.eigenbase.sql.fun.SqlStdOperatorTable; +import org.eigenbase.sql.type.InferTypes; +import org.eigenbase.sql.type.OperandTypes; +import org.eigenbase.sql.type.ReturnTypes; +import org.eigenbase.sql.type.SqlOperandTypeChecker; +import org.eigenbase.sql.type.SqlOperandTypeInference; +import org.eigenbase.sql.type.SqlReturnTypeInference; +import org.eigenbase.sql.type.SqlTypeFamily; +import org.eigenbase.util.Util; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Maps; + +public class SqlFunctionConverter { + private static final Log LOG = LogFactory.getLog(SqlFunctionConverter.class); + + static final Map hiveToOptiq; + static final Map optiqToHiveToken; + static final Map reverseOperatorMap; + + static { + StaticBlockBuilder builder = new StaticBlockBuilder(); + hiveToOptiq = ImmutableMap.copyOf(builder.hiveToOptiq); + optiqToHiveToken = ImmutableMap.copyOf(builder.optiqToHiveToken); + reverseOperatorMap = ImmutableMap.copyOf(builder.reverseOperatorMap); + } + + public static SqlOperator getOptiqOperator(String funcTextName, GenericUDF hiveUDF, + ImmutableList optiqArgTypes, RelDataType retType) throws OptiqSemanticException { + // handle overloaded methods first + if (hiveUDF instanceof GenericUDFOPNegative) { + return SqlStdOperatorTable.UNARY_MINUS; + } else if (hiveUDF instanceof GenericUDFOPPositive) { + return SqlStdOperatorTable.UNARY_PLUS; + } // do generic lookup + String name = null; + if (StringUtils.isEmpty(funcTextName)) { + name = getName(hiveUDF); // this should probably never happen, see getName comment + LOG.warn("The function text was empty, name from annotation is " + name); + } else { + // We could just do toLowerCase here and let SA qualify it, but let's be proper... + name = FunctionRegistry.getNormalizedFunctionName(funcTextName); + } + return getOptiqFn(name, optiqArgTypes, retType); + } + + public static GenericUDF getHiveUDF(SqlOperator op, RelDataType dt) { + String name = reverseOperatorMap.get(op); + if (name == null) + name = op.getName(); + FunctionInfo hFn = name != null ? FunctionRegistry.getFunctionInfo(name) : null; + if (hFn == null) + hFn = handleExplicitCast(op, dt); + return hFn == null ? null : hFn.getGenericUDF(); + } + + private static FunctionInfo handleExplicitCast(SqlOperator op, RelDataType dt) { + FunctionInfo castUDF = null; + + if (op.kind == SqlKind.CAST) { + TypeInfo castType = TypeConverter.convert(dt); + + if (castType.equals(TypeInfoFactory.byteTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("tinyint"); + } else if (castType instanceof CharTypeInfo) { + castUDF = handleCastForParameterizedType(castType, + FunctionRegistry.getFunctionInfo("char")); + } else if (castType instanceof VarcharTypeInfo) { + castUDF = handleCastForParameterizedType(castType, + FunctionRegistry.getFunctionInfo("varchar")); + } else if (castType.equals(TypeInfoFactory.stringTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("string"); + } else if (castType.equals(TypeInfoFactory.booleanTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("boolean"); + } else if (castType.equals(TypeInfoFactory.shortTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("smallint"); + } else if (castType.equals(TypeInfoFactory.intTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("int"); + } else if (castType.equals(TypeInfoFactory.longTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("bigint"); + } else if (castType.equals(TypeInfoFactory.floatTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("float"); + } else if (castType.equals(TypeInfoFactory.doubleTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("double"); + } else if (castType.equals(TypeInfoFactory.timestampTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("timestamp"); + } else if (castType.equals(TypeInfoFactory.dateTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("datetime"); + } else if (castType instanceof DecimalTypeInfo) { + castUDF = handleCastForParameterizedType(castType, + FunctionRegistry.getFunctionInfo("decimal")); + } else if (castType.equals(TypeInfoFactory.binaryTypeInfo)) { + castUDF = FunctionRegistry.getFunctionInfo("binary"); + } else throw new IllegalStateException("Unexpected type : " + + castType.getQualifiedName()); + } + + return castUDF; + } + + private static FunctionInfo handleCastForParameterizedType(TypeInfo ti, FunctionInfo fi) { + SettableUDF udf = (SettableUDF)fi.getGenericUDF(); + try { + udf.setTypeInfo(ti); + } catch (UDFArgumentException e) { + throw new RuntimeException(e); + } + return new FunctionInfo(fi.isNative(),fi.getDisplayName(),(GenericUDF)udf); + } + + // TODO: 1) handle Agg Func Name translation 2) is it correct to add func args + // as child of func? + public static ASTNode buildAST(SqlOperator op, List children) { + HiveToken hToken = optiqToHiveToken.get(op); + ASTNode node; + if (hToken != null) { + node = (ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text); + } else { + node = (ASTNode) ParseDriver.adaptor.create(HiveParser.TOK_FUNCTION, "TOK_FUNCTION"); + if (op.kind != SqlKind.CAST) { + if (op.kind == SqlKind.MINUS_PREFIX) { + node = (ASTNode) ParseDriver.adaptor.create(HiveParser.MINUS, "MINUS"); + } else if (op.kind == SqlKind.PLUS_PREFIX) { + node = (ASTNode) ParseDriver.adaptor.create(HiveParser.PLUS, "PLUS"); + } else { + if (op.getName().toUpperCase() + .equals(SqlStdOperatorTable.COUNT.getName()) + && children.size() == 0) { + node = (ASTNode) ParseDriver.adaptor.create( + HiveParser.TOK_FUNCTIONSTAR, "TOK_FUNCTIONSTAR"); + } + node.addChild((ASTNode) ParseDriver.adaptor.create(HiveParser.Identifier, op.getName())); + } + } + } + + for (ASTNode c : children) { + ParseDriver.adaptor.addChild(node, c); + } + return node; + } + + /** + * Build AST for flattened Associative expressions ('and', 'or'). Flattened + * expressions is of the form or[x,y,z] which is originally represented as + * "or[x, or[y, z]]". + */ + public static ASTNode buildAST(SqlOperator op, List children, int i) { + if (i + 1 < children.size()) { + HiveToken hToken = optiqToHiveToken.get(op); + ASTNode curNode = ((ASTNode) ParseDriver.adaptor.create(hToken.type, hToken.text)); + ParseDriver.adaptor.addChild(curNode, children.get(i)); + ParseDriver.adaptor.addChild(curNode, buildAST(op, children, i + 1)); + return curNode; + } else { + return children.get(i); + } + + } + + // TODO: this is not valid. Function names for built-in UDFs are specified in FunctionRegistry, + // and only happen to match annotations. For user UDFs, the name is what user specifies at + // creation time (annotation can be absent, different, or duplicate some other function). + private static String getName(GenericUDF hiveUDF) { + String udfName = null; + if (hiveUDF instanceof GenericUDFBridge) { + udfName = ((GenericUDFBridge) hiveUDF).getUdfName(); + } else { + Class udfClass = hiveUDF.getClass(); + Annotation udfAnnotation = udfClass.getAnnotation(Description.class); + + if (udfAnnotation != null && udfAnnotation instanceof Description) { + Description udfDescription = (Description) udfAnnotation; + udfName = udfDescription.name(); + if (udfName != null) { + String[] aliases = udfName.split(","); + if (aliases.length > 0) + udfName = aliases[0]; + } + } + + if (udfName == null || udfName.isEmpty()) { + udfName = hiveUDF.getClass().getName(); + int indx = udfName.lastIndexOf("."); + if (indx >= 0) { + indx += 1; + udfName = udfName.substring(indx); + } + } + } + + return udfName; + } + + /** This class is used to build immutable hashmaps in the static block above. */ + private static class StaticBlockBuilder { + final Map hiveToOptiq = Maps.newHashMap(); + final Map optiqToHiveToken = Maps.newHashMap(); + final Map reverseOperatorMap = Maps.newHashMap(); + + StaticBlockBuilder() { + registerFunction("+", SqlStdOperatorTable.PLUS, hToken(HiveParser.PLUS, "+")); + registerFunction("-", SqlStdOperatorTable.MINUS, hToken(HiveParser.MINUS, "-")); + registerFunction("*", SqlStdOperatorTable.MULTIPLY, hToken(HiveParser.STAR, "*")); + registerFunction("/", SqlStdOperatorTable.DIVIDE, hToken(HiveParser.STAR, "/")); + registerFunction("%", SqlStdOperatorTable.MOD, hToken(HiveParser.STAR, "%")); + registerFunction("and", SqlStdOperatorTable.AND, hToken(HiveParser.KW_AND, "and")); + registerFunction("or", SqlStdOperatorTable.OR, hToken(HiveParser.KW_OR, "or")); + registerFunction("=", SqlStdOperatorTable.EQUALS, hToken(HiveParser.EQUAL, "=")); + registerFunction("<", SqlStdOperatorTable.LESS_THAN, hToken(HiveParser.LESSTHAN, "<")); + registerFunction("<=", SqlStdOperatorTable.LESS_THAN_OR_EQUAL, + hToken(HiveParser.LESSTHANOREQUALTO, "<=")); + registerFunction(">", SqlStdOperatorTable.GREATER_THAN, hToken(HiveParser.GREATERTHAN, ">")); + registerFunction(">=", SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, + hToken(HiveParser.GREATERTHANOREQUALTO, ">=")); + registerFunction("!", SqlStdOperatorTable.NOT, hToken(HiveParser.KW_NOT, "not")); + } + + private void registerFunction(String name, SqlOperator optiqFn, HiveToken hiveToken) { + reverseOperatorMap.put(optiqFn, name); + FunctionInfo hFn = FunctionRegistry.getFunctionInfo(name); + if (hFn != null) { + String hFnName = getName(hFn.getGenericUDF()); + hiveToOptiq.put(hFnName, optiqFn); + + if (hiveToken != null) { + optiqToHiveToken.put(optiqFn, hiveToken); + } + } + } + } + + private static HiveToken hToken(int type, String text) { + return new HiveToken(type, text); + } + + public static class OptiqUDAF extends SqlAggFunction { + final ImmutableList m_argTypes; + final RelDataType m_retType; + + public OptiqUDAF(String opName, SqlReturnTypeInference returnTypeInference, + SqlOperandTypeInference operandTypeInference, SqlOperandTypeChecker operandTypeChecker, + ImmutableList argTypes, RelDataType retType) { + super(opName, SqlKind.OTHER_FUNCTION, returnTypeInference, operandTypeInference, + operandTypeChecker, SqlFunctionCategory.USER_DEFINED_FUNCTION); + m_argTypes = argTypes; + m_retType = retType; + } + + @Override + public List getParameterTypes(final RelDataTypeFactory typeFactory) { + return m_argTypes; + } + + @Override + public RelDataType getReturnType(final RelDataTypeFactory typeFactory) { + return m_retType; + } + } + + private static class OptiqUDFInfo { + private String m_udfName; + private SqlReturnTypeInference m_returnTypeInference; + private SqlOperandTypeInference m_operandTypeInference; + private SqlOperandTypeChecker m_operandTypeChecker; + private ImmutableList m_argTypes; + private RelDataType m_retType; + } + + private static OptiqUDFInfo getUDFInfo(String hiveUdfName, + ImmutableList optiqArgTypes, RelDataType optiqRetType) { + OptiqUDFInfo udfInfo = new OptiqUDFInfo(); + udfInfo.m_udfName = hiveUdfName; + udfInfo.m_returnTypeInference = ReturnTypes.explicit(optiqRetType); + udfInfo.m_operandTypeInference = InferTypes.explicit(optiqArgTypes); + ImmutableList.Builder typeFamilyBuilder = new ImmutableList.Builder(); + for (RelDataType at : optiqArgTypes) { + typeFamilyBuilder.add(Util.first(at.getSqlTypeName().getFamily(), SqlTypeFamily.ANY)); + } + udfInfo.m_operandTypeChecker = OperandTypes.family(typeFamilyBuilder.build()); + + udfInfo.m_argTypes = ImmutableList. copyOf(optiqArgTypes); + udfInfo.m_retType = optiqRetType; + + return udfInfo; + } + + public static SqlOperator getOptiqFn(String hiveUdfName, + ImmutableList optiqArgTypes, RelDataType optiqRetType) throws OptiqSemanticException{ + + if (hiveUdfName != null && hiveUdfName.trim().equals("<=>")) { + // We can create Optiq IS_DISTINCT_FROM operator for this. But since our + // join reordering algo cant handle this anyway there is no advantage of this. + // So, bail out for now. + throw new OptiqSemanticException("<=> is not yet supported for cbo."); + } + SqlOperator optiqOp = hiveToOptiq.get(hiveUdfName); + if (optiqOp == null) { + OptiqUDFInfo uInf = getUDFInfo(hiveUdfName, optiqArgTypes, optiqRetType); + optiqOp = new SqlFunction(uInf.m_udfName, SqlKind.OTHER_FUNCTION, uInf.m_returnTypeInference, + uInf.m_operandTypeInference, uInf.m_operandTypeChecker, + SqlFunctionCategory.USER_DEFINED_FUNCTION); + } + + return optiqOp; + } + + public static SqlAggFunction getOptiqAggFn(String hiveUdfName, + ImmutableList optiqArgTypes, RelDataType optiqRetType) { + SqlAggFunction optiqAggFn = (SqlAggFunction) hiveToOptiq.get(hiveUdfName); + if (optiqAggFn == null) { + OptiqUDFInfo uInf = getUDFInfo(hiveUdfName, optiqArgTypes, optiqRetType); + + optiqAggFn = new OptiqUDAF(uInf.m_udfName, uInf.m_returnTypeInference, + uInf.m_operandTypeInference, uInf.m_operandTypeChecker, uInf.m_argTypes, uInf.m_retType); + } + + return optiqAggFn; + } + + static class HiveToken { + int type; + String text; + String[] args; + + HiveToken(int type, String text, String... args) { + this.type = type; + this.text = text; + this.args = args; + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java new file mode 100644 index 0000000..2c30e9d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/TypeConverter.java @@ -0,0 +1,327 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.optiq.translator; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.exec.RowSchema; +import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter.HiveToken; +import org.apache.hadoop.hive.ql.parse.HiveParser; +import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeFactory; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.sql.type.SqlTypeName; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableMap.Builder; +import com.google.common.collect.Lists; + +public class TypeConverter { + private static final Map optiqToHiveTypeNameMap; + + // TODO: Handling of char[], varchar[], string... + static { + Builder b = ImmutableMap. builder(); + b.put(SqlTypeName.BOOLEAN.getName(), new HiveToken(HiveParser.TOK_BOOLEAN, "TOK_BOOLEAN")); + b.put(SqlTypeName.TINYINT.getName(), new HiveToken(HiveParser.TOK_TINYINT, "TOK_TINYINT")); + b.put(SqlTypeName.SMALLINT.getName(), new HiveToken(HiveParser.TOK_SMALLINT, "TOK_SMALLINT")); + b.put(SqlTypeName.INTEGER.getName(), new HiveToken(HiveParser.TOK_INT, "TOK_INT")); + b.put(SqlTypeName.BIGINT.getName(), new HiveToken(HiveParser.TOK_BIGINT, "TOK_BIGINT")); + b.put(SqlTypeName.FLOAT.getName(), new HiveToken(HiveParser.TOK_FLOAT, "TOK_FLOAT")); + b.put(SqlTypeName.DOUBLE.getName(), new HiveToken(HiveParser.TOK_DOUBLE, "TOK_DOUBLE")); + b.put(SqlTypeName.DATE.getName(), new HiveToken(HiveParser.TOK_DATE, "TOK_DATE")); + b.put(SqlTypeName.TIMESTAMP.getName(), new HiveToken(HiveParser.TOK_TIMESTAMP, "TOK_TIMESTAMP")); + b.put(SqlTypeName.BINARY.getName(), new HiveToken(HiveParser.TOK_BINARY, "TOK_BINARY")); + optiqToHiveTypeNameMap = b.build(); + }; + + /*********************** Convert Hive Types To Optiq Types ***********************/ + public static RelDataType getType(RelOptCluster cluster, + List cInfoLst) throws OptiqSemanticException { + RexBuilder rexBuilder = cluster.getRexBuilder(); + RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory(); + List fieldTypes = new LinkedList(); + List fieldNames = new LinkedList(); + + for (ColumnInfo ci : cInfoLst) { + fieldTypes.add(convert(ci.getType(), dtFactory)); + fieldNames.add(ci.getInternalName()); + } + return dtFactory.createStructType(fieldTypes, fieldNames); + } + + public static RelDataType getType(RelOptCluster cluster, RowResolver rr, + List neededCols) throws OptiqSemanticException { + RexBuilder rexBuilder = cluster.getRexBuilder(); + RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory(); + RowSchema rs = rr.getRowSchema(); + List fieldTypes = new LinkedList(); + List fieldNames = new LinkedList(); + + for (ColumnInfo ci : rs.getSignature()) { + if (neededCols == null || neededCols.contains(ci.getInternalName())) { + fieldTypes.add(convert(ci.getType(), dtFactory)); + fieldNames.add(ci.getInternalName()); + } + } + return dtFactory.createStructType(fieldTypes, fieldNames); + } + + public static RelDataType convert(TypeInfo type, RelDataTypeFactory dtFactory) + throws OptiqSemanticException{ + RelDataType convertedType = null; + + switch (type.getCategory()) { + case PRIMITIVE: + convertedType = convert((PrimitiveTypeInfo) type, dtFactory); + break; + case LIST: + convertedType = convert((ListTypeInfo) type, dtFactory); + break; + case MAP: + convertedType = convert((MapTypeInfo) type, dtFactory); + break; + case STRUCT: + convertedType = convert((StructTypeInfo) type, dtFactory); + break; + case UNION: + convertedType = convert((UnionTypeInfo) type, dtFactory); + break; + } + return convertedType; + } + + public static RelDataType convert(PrimitiveTypeInfo type, RelDataTypeFactory dtFactory) { + RelDataType convertedType = null; + + switch (type.getPrimitiveCategory()) { + case VOID: + convertedType = dtFactory.createSqlType(SqlTypeName.NULL); + break; + case BOOLEAN: + convertedType = dtFactory.createSqlType(SqlTypeName.BOOLEAN); + break; + case BYTE: + convertedType = dtFactory.createSqlType(SqlTypeName.TINYINT); + break; + case SHORT: + convertedType = dtFactory.createSqlType(SqlTypeName.SMALLINT); + break; + case INT: + convertedType = dtFactory.createSqlType(SqlTypeName.INTEGER); + break; + case LONG: + convertedType = dtFactory.createSqlType(SqlTypeName.BIGINT); + break; + case FLOAT: + convertedType = dtFactory.createSqlType(SqlTypeName.FLOAT); + break; + case DOUBLE: + convertedType = dtFactory.createSqlType(SqlTypeName.DOUBLE); + break; + case STRING: + convertedType = dtFactory.createSqlType(SqlTypeName.VARCHAR, Integer.MAX_VALUE); + break; + case DATE: + convertedType = dtFactory.createSqlType(SqlTypeName.DATE); + break; + case TIMESTAMP: + convertedType = dtFactory.createSqlType(SqlTypeName.TIMESTAMP); + break; + case BINARY: + convertedType = dtFactory.createSqlType(SqlTypeName.BINARY); + break; + case DECIMAL: + DecimalTypeInfo dtInf = (DecimalTypeInfo) type; + convertedType = dtFactory + .createSqlType(SqlTypeName.DECIMAL, dtInf.precision(), dtInf.scale()); + break; + case VARCHAR: + convertedType = dtFactory.createSqlType(SqlTypeName.VARCHAR, + ((BaseCharTypeInfo) type).getLength()); + break; + case CHAR: + convertedType = dtFactory.createSqlType(SqlTypeName.CHAR, + ((BaseCharTypeInfo) type).getLength()); + break; + case UNKNOWN: + convertedType = dtFactory.createSqlType(SqlTypeName.OTHER); + break; + } + + if (null == convertedType) { + throw new RuntimeException("Unsupported Type : " + type.getTypeName()); + } + + return convertedType; + } + + public static RelDataType convert(ListTypeInfo lstType, + RelDataTypeFactory dtFactory) throws OptiqSemanticException { + RelDataType elemType = convert(lstType.getListElementTypeInfo(), dtFactory); + return dtFactory.createArrayType(elemType, -1); + } + + public static RelDataType convert(MapTypeInfo mapType, RelDataTypeFactory dtFactory) + throws OptiqSemanticException { + RelDataType keyType = convert(mapType.getMapKeyTypeInfo(), dtFactory); + RelDataType valueType = convert(mapType.getMapValueTypeInfo(), dtFactory); + return dtFactory.createMapType(keyType, valueType); + } + + public static RelDataType convert(StructTypeInfo structType, + final RelDataTypeFactory dtFactory) throws OptiqSemanticException { + List fTypes = new ArrayList(structType.getAllStructFieldTypeInfos().size()); + for (TypeInfo ti : structType.getAllStructFieldTypeInfos()) { + fTypes.add(convert(ti,dtFactory)); + } + return dtFactory.createStructType(fTypes, structType.getAllStructFieldNames()); + } + + public static RelDataType convert(UnionTypeInfo unionType, RelDataTypeFactory dtFactory) + throws OptiqSemanticException{ + // Union type is not supported in Optiq. + throw new OptiqSemanticException("Union type is not supported"); + } + + public static TypeInfo convert(RelDataType rType) { + if (rType.isStruct()) { + return convertStructType(rType); + } else if (rType.getComponentType() != null) { + return convertListType(rType); + } else if (rType.getKeyType() != null) { + return convertMapType(rType); + } else { + return convertPrimtiveType(rType); + } + } + + public static TypeInfo convertStructType(RelDataType rType) { + List fTypes = Lists.transform(rType.getFieldList(), + new Function() { + @Override + public TypeInfo apply(RelDataTypeField f) { + return convert(f.getType()); + } + }); + List fNames = Lists.transform(rType.getFieldList(), + new Function() { + @Override + public String apply(RelDataTypeField f) { + return f.getName(); + } + }); + return TypeInfoFactory.getStructTypeInfo(fNames, fTypes); + } + + public static TypeInfo convertMapType(RelDataType rType) { + return TypeInfoFactory.getMapTypeInfo(convert(rType.getKeyType()), + convert(rType.getValueType())); + } + + public static TypeInfo convertListType(RelDataType rType) { + return TypeInfoFactory.getListTypeInfo(convert(rType.getComponentType())); + } + + public static TypeInfo convertPrimtiveType(RelDataType rType) { + switch (rType.getSqlTypeName()) { + case BOOLEAN: + return TypeInfoFactory.booleanTypeInfo; + case TINYINT: + return TypeInfoFactory.byteTypeInfo; + case SMALLINT: + return TypeInfoFactory.shortTypeInfo; + case INTEGER: + return TypeInfoFactory.intTypeInfo; + case BIGINT: + return TypeInfoFactory.longTypeInfo; + case FLOAT: + return TypeInfoFactory.floatTypeInfo; + case DOUBLE: + return TypeInfoFactory.doubleTypeInfo; + case DATE: + return TypeInfoFactory.dateTypeInfo; + case TIMESTAMP: + return TypeInfoFactory.timestampTypeInfo; + case BINARY: + return TypeInfoFactory.binaryTypeInfo; + case DECIMAL: + return TypeInfoFactory.getDecimalTypeInfo(rType.getPrecision(), rType.getScale()); + case VARCHAR: + if (rType.getPrecision() == Integer.MAX_VALUE) + return TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + else + return TypeInfoFactory.getVarcharTypeInfo(rType.getPrecision()); + case CHAR: + return TypeInfoFactory.getCharTypeInfo(rType.getPrecision()); + case OTHER: + default: + return TypeInfoFactory.voidTypeInfo; + } + + } + + /*********************** Convert Optiq Types To Hive Types ***********************/ + public static HiveToken hiveToken(RelDataType optiqType) { + HiveToken ht = null; + + switch (optiqType.getSqlTypeName()) { + case CHAR: { + ht = new HiveToken(HiveParser.TOK_CHAR, "TOK_CHAR", String.valueOf(optiqType.getPrecision())); + } + break; + case VARCHAR: { + if (optiqType.getPrecision() == Integer.MAX_VALUE) + ht = new HiveToken(HiveParser.TOK_STRING, "TOK_STRING", String.valueOf(optiqType + .getPrecision())); + else + ht = new HiveToken(HiveParser.TOK_VARCHAR, "TOK_VARCHAR", String.valueOf(optiqType + .getPrecision())); + } + break; + case DECIMAL: { + ht = new HiveToken(HiveParser.TOK_DECIMAL, "TOK_DECIMAL", String.valueOf(optiqType + .getPrecision()), String.valueOf(optiqType.getScale())); + } + break; + default: + ht = optiqToHiveTypeNameMap.get(optiqType.getSqlTypeName().getName()); + } + + return ht; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductCheck.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductCheck.java index c22d9ac..7ea0166 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductCheck.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CrossProductCheck.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.ConditionalTask; +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; @@ -56,6 +57,7 @@ import org.apache.hadoop.hive.ql.plan.MapJoinDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.MergeJoinWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.ReduceWork; @@ -152,6 +154,11 @@ private void checkMapJoins(MapRedTask mrTsk) throws SemanticException { private void checkMapJoins(TezWork tzWrk) throws SemanticException { for(BaseWork wrk : tzWrk.getAllWork() ) { + + if ( wrk instanceof MergeJoinWork ) { + wrk = ((MergeJoinWork)wrk).getMainWork(); + } + List warnings = new MapJoinCheck(wrk.getName()).analyze(wrk); if ( !warnings.isEmpty() ) { for(String w : warnings) { @@ -163,12 +170,17 @@ private void checkMapJoins(TezWork tzWrk) throws SemanticException { private void checkTezReducer(TezWork tzWrk) throws SemanticException { for(BaseWork wrk : tzWrk.getAllWork() ) { - if ( !(wrk instanceof ReduceWork) ) { + + if ( wrk instanceof MergeJoinWork ) { + wrk = ((MergeJoinWork)wrk).getMainWork(); + } + + if ( !(wrk instanceof ReduceWork ) ) { continue; } ReduceWork rWork = (ReduceWork) wrk; Operator reducer = ((ReduceWork)wrk).getReducer(); - if ( reducer instanceof JoinOperator ) { + if ( reducer instanceof JoinOperator || reducer instanceof CommonMergeJoinOperator ) { Map rsInfo = new HashMap(); for(Map.Entry e : rWork.getTagToInput().entrySet()) { @@ -185,7 +197,7 @@ private void checkMRReducer(String taskName, MapredWork mrWrk) throws SemanticEx return; } Operator reducer = rWrk.getReducer(); - if ( reducer instanceof JoinOperator ) { + if ( reducer instanceof JoinOperator|| reducer instanceof CommonMergeJoinOperator ) { BaseWork prntWork = mrWrk.getMapWork(); checkForCrossProduct(taskName, reducer, new ExtractReduceSinkInfo(null).analyze(prntWork)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java index 7fb2c4d..957c327 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/NullScanTaskDispatcher.java @@ -102,13 +102,19 @@ private PartitionDesc changePartitionToMetadataOnly(PartitionDesc desc) { } private void processAlias(MapWork work, String alias) { + List paths = getPathsForAlias(work, alias); + if (paths.isEmpty()) { + // partitioned table which don't select any partitions + // there are no paths to replace with fakePath + return; + } work.setUseOneNullRowInputFormat(true); // Change the alias partition desc PartitionDesc aliasPartn = work.getAliasToPartnInfo().get(alias); changePartitionToMetadataOnly(aliasPartn); - List paths = getPathsForAlias(work, alias); + for (String path : paths) { PartitionDesc partDesc = work.getPathToPartitionInfo().get(path); PartitionDesc newPartition = changePartitionToMetadataOnly(partDesc); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index b5f2d09..050a9a5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -422,10 +422,12 @@ private boolean getOnlyStructObjectInspectors(ReduceWork reduceWork) throws Sema // Check value ObjectInspector. ObjectInspector valueObjectInspector = reduceWork.getValueObjectInspector(); - if (valueObjectInspector == null || !(valueObjectInspector instanceof StructObjectInspector)) { + if (valueObjectInspector == null || + !(valueObjectInspector instanceof StructObjectInspector)) { return false; } - StructObjectInspector valueStructObjectInspector = (StructObjectInspector)valueObjectInspector; + StructObjectInspector valueStructObjectInspector = + (StructObjectInspector)valueObjectInspector; valueColCount = valueStructObjectInspector.getAllStructFieldRefs().size(); } catch (Exception e) { throw new SemanticException(e); @@ -471,18 +473,20 @@ private void vectorizeReduceWork(ReduceWork reduceWork) throws SemanticException LOG.info("Vectorizing ReduceWork..."); reduceWork.setVectorMode(true); - // For some reason, the DefaultGraphWalker does not descend down from the reducer Operator as expected. - // We need to descend down, otherwise it breaks our algorithm that determines VectorizationContext... - // Do we use PreOrderWalker instead of DefaultGraphWalker. + // For some reason, the DefaultGraphWalker does not descend down from the reducer Operator as + // expected. We need to descend down, otherwise it breaks our algorithm that determines + // VectorizationContext... Do we use PreOrderWalker instead of DefaultGraphWalker. Map opRules = new LinkedHashMap(); - ReduceWorkVectorizationNodeProcessor vnp = new ReduceWorkVectorizationNodeProcessor(reduceWork, keyColCount, valueColCount); + ReduceWorkVectorizationNodeProcessor vnp = + new ReduceWorkVectorizationNodeProcessor(reduceWork, keyColCount, valueColCount); addReduceWorkRules(opRules, vnp); Dispatcher disp = new DefaultRuleDispatcher(vnp, opRules, null); GraphWalker ogw = new PreOrderWalker(disp); // iterator the reduce operator tree ArrayList topNodes = new ArrayList(); topNodes.add(reduceWork.getReducer()); - LOG.info("vectorizeReduceWork reducer Operator: " + reduceWork.getReducer().getName() + "..."); + LOG.info("vectorizeReduceWork reducer Operator: " + + reduceWork.getReducer().getName() + "..."); HashMap nodeOutput = new HashMap(); ogw.startWalking(topNodes, nodeOutput); @@ -561,7 +565,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, protected final Map scratchColumnContext = new HashMap(); - protected final Map, VectorizationContext> vContextsByTSOp = + protected final Map, VectorizationContext> vContextsByOp = new HashMap, VectorizationContext>(); protected final Set> opsDone = @@ -589,28 +593,30 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return scratchColumnMap; } - public VectorizationContext walkStackToFindVectorizationContext(Stack stack, Operator op) - throws SemanticException { + public VectorizationContext walkStackToFindVectorizationContext(Stack stack, + Operator op) throws SemanticException { VectorizationContext vContext = null; if (stack.size() <= 1) { - throw new SemanticException(String.format("Expected operator stack for operator %s to have at least 2 operators", op.getName())); + throw new SemanticException( + String.format("Expected operator stack for operator %s to have at least 2 operators", + op.getName())); } // Walk down the stack of operators until we found one willing to give us a context. // At the bottom will be the root operator, guaranteed to have a context int i= stack.size()-2; while (vContext == null) { if (i < 0) { - throw new SemanticException(String.format("Did not find vectorization context for operator %s in operator stack", op.getName())); + return null; } Operator opParent = (Operator) stack.get(i); - vContext = vContextsByTSOp.get(opParent); + vContext = vContextsByOp.get(opParent); --i; } return vContext; } - public Operator doVectorize(Operator op, VectorizationContext vContext) - throws SemanticException { + public Operator doVectorize(Operator op, + VectorizationContext vContext) throws SemanticException { Operator vectorOp = op; try { if (!opsDone.contains(op)) { @@ -622,7 +628,7 @@ public VectorizationContext walkStackToFindVectorizationContext(Stack stac if (vectorOp instanceof VectorizationContextRegion) { VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp; VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext(); - vContextsByTSOp.put(op, vOutContext); + vContextsByOp.put(op, vOutContext); scratchColumnContext.put(vOutContext.getFileKey(), vOutContext); } } @@ -669,13 +675,24 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // vContext.setFileKey(onefile); scratchColumnContext.put(onefile, vContext); + if (LOG.isDebugEnabled()) { + LOG.debug("Vectorized MapWork operator " + op.getName() + + " with vectorization context key=" + vContext.getFileKey() + + ", vectorTypes: " + vContext.getOutputColumnTypeMap().toString() + + ", columnMap: " + vContext.getColumnMap().toString()); + } break; } } } - vContextsByTSOp.put(op, vContext); + vContextsByOp.put(op, vContext); } else { vContext = walkStackToFindVectorizationContext(stack, op); + if (vContext == null) { + throw new SemanticException( + String.format("Did not find vectorization context for operator %s in operator stack", + op.getName())); + } } assert vContext != null; @@ -690,7 +707,22 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } - doVectorize(op, vContext); + Operator vectorOp = doVectorize(op, vContext); + + if (LOG.isDebugEnabled()) { + LOG.debug("Vectorized MapWork operator " + vectorOp.getName() + + " with vectorization context key=" + vContext.getFileKey() + + ", vectorTypes: " + vContext.getOutputColumnTypeMap().toString() + + ", columnMap: " + vContext.getColumnMap().toString()); + if (vectorOp instanceof VectorizationContextRegion) { + VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp; + VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext(); + LOG.debug("Vectorized MapWork operator " + vectorOp.getName() + + " added new vectorization context key=" + vOutContext.getFileKey() + + ", vectorTypes: " + vOutContext.getOutputColumnTypeMap().toString() + + ", columnMap: " + vOutContext.getColumnMap().toString()); + } + } return null; } @@ -702,6 +734,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, private int keyColCount; private int valueColCount; private Map reduceColumnNameMap; + + private VectorizationContext reduceShuffleVectorizationContext; private Operator rootVectorOp; @@ -709,12 +743,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return rootVectorOp; } - public ReduceWorkVectorizationNodeProcessor(ReduceWork rWork, int keyColCount, int valueColCount) { + public ReduceWorkVectorizationNodeProcessor(ReduceWork rWork, int keyColCount, + int valueColCount) { this.rWork = rWork; reduceColumnNameMap = rWork.getReduceColumnNameMap(); this.keyColCount = keyColCount; this.valueColCount = valueColCount; rootVectorOp = null; + reduceShuffleVectorizationContext = null; } @Override @@ -722,7 +758,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { Operator op = (Operator) nd; - LOG.info("ReduceWorkVectorizationNodeProcessor processing Operator: " + op.getName() + "..."); + LOG.info("ReduceWorkVectorizationNodeProcessor processing Operator: " + + op.getName() + "..."); VectorizationContext vContext = null; @@ -730,10 +767,24 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, if (op.getParentOperators().size() == 0) { vContext = getReduceVectorizationContext(reduceColumnNameMap); - vContextsByTSOp.put(op, vContext); + vContext.setFileKey("_REDUCE_SHUFFLE_"); + scratchColumnContext.put("_REDUCE_SHUFFLE_", vContext); + reduceShuffleVectorizationContext = vContext; saveRootVectorOp = true; + + if (LOG.isDebugEnabled()) { + LOG.debug("Vectorized ReduceWork reduce shuffle vectorization context key=" + + vContext.getFileKey() + + ", vectorTypes: " + vContext.getOutputColumnTypeMap().toString() + + ", columnMap: " + vContext.getColumnMap().toString()); + } } else { vContext = walkStackToFindVectorizationContext(stack, op); + if (vContext == null) { + // If we didn't find a context among the operators, assume the top -- reduce shuffle's + // vectorization context. + vContext = reduceShuffleVectorizationContext; + } } assert vContext != null; @@ -749,6 +800,21 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } Operator vectorOp = doVectorize(op, vContext); + + if (LOG.isDebugEnabled()) { + LOG.debug("Vectorized ReduceWork operator " + vectorOp.getName() + + " with vectorization context key=" + vContext.getFileKey() + + ", vectorTypes: " + vContext.getOutputColumnTypeMap().toString() + + ", columnMap: " + vContext.getColumnMap().toString()); + if (vectorOp instanceof VectorizationContextRegion) { + VectorizationContextRegion vcRegion = (VectorizationContextRegion) vectorOp; + VectorizationContext vOutContext = vcRegion.getOuputVectorizationContext(); + LOG.debug("Vectorized ReduceWork operator " + vectorOp.getName() + + " added new vectorization context key=" + vOutContext.getFileKey() + + ", vectorTypes: " + vOutContext.getOutputColumnTypeMap().toString() + + ", columnMap: " + vOutContext.getColumnMap().toString()); + } + } if (vectorOp instanceof VectorGroupByOperator) { VectorGroupByOperator groupBy = (VectorGroupByOperator) vectorOp; VectorGroupByDesc vectorDesc = groupBy.getConf().getVectorDesc(); @@ -827,6 +893,7 @@ boolean validateMapWorkOperator(Operator op, boolean isT break; case FILESINK: case LIMIT: + case EVENT: ret = true; break; default: @@ -866,6 +933,7 @@ boolean validateReduceWorkOperator(Operator op) { ret = validateFileSinkOperator((FileSinkOperator) op); break; case LIMIT: + case EVENT: ret = true; break; default: @@ -1005,11 +1073,6 @@ private boolean validateExtractOperator(ExtractOperator op) { } private boolean validateFileSinkOperator(FileSinkOperator op) { - // HIVE-7557: For now, turn off dynamic partitioning to give more time to - // figure out how to make VectorFileSink work correctly with it... - if (op.getConf().getDynPartCtx() != null) { - return false; - } return true; } @@ -1017,7 +1080,8 @@ private boolean validateExprNodeDesc(List descs) { return validateExprNodeDesc(descs, VectorExpressionDescriptor.Mode.PROJECTION); } - private boolean validateExprNodeDesc(List descs, VectorExpressionDescriptor.Mode mode) { + private boolean validateExprNodeDesc(List descs, + VectorExpressionDescriptor.Mode mode) { for (ExprNodeDesc d : descs) { boolean ret = validateExprNodeDesc(d, mode); if (!ret) { @@ -1109,8 +1173,8 @@ private boolean validateAggregationDesc(AggregationDesc aggDesc, boolean isReduc if (!supportedAggregationUdfs.contains(aggDesc.getGenericUDAFName().toLowerCase())) { return false; } - if (aggDesc.getParameters() != null) { - return validateExprNodeDesc(aggDesc.getParameters()); + if (aggDesc.getParameters() != null && !validateExprNodeDesc(aggDesc.getParameters())) { + return false; } // See if we can vectorize the aggregation. try { @@ -1175,11 +1239,13 @@ private VectorizationContext getVectorizationContext(Operator op, return new VectorizationContext(cmap, columnCount); } - private VectorizationContext getReduceVectorizationContext(Map reduceColumnNameMap) { + private VectorizationContext getReduceVectorizationContext( + Map reduceColumnNameMap) { return new VectorizationContext(reduceColumnNameMap, reduceColumnNameMap.size()); } - private void fixupParentChildOperators(Operator op, Operator vectorOp) { + private void fixupParentChildOperators(Operator op, + Operator vectorOp) { if (op.getParentOperators() != null) { vectorOp.setParentOperators(op.getParentOperators()); for (Operator p : op.getParentOperators()) { @@ -1207,6 +1273,7 @@ private void fixupParentChildOperators(Operator op, Oper case REDUCESINK: case LIMIT: case EXTRACT: + case EVENT: vectorOp = OperatorFactory.getVectorOperator(op.getConf(), vContext); break; default: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index c6669af..e43d39f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; /** * The transformation step that does partition pruning. @@ -155,27 +156,85 @@ public static PrunedPartitionList prune(TableScanOperator ts, ParseContext parse * pruner condition. * @throws HiveException */ - private static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, + public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, HiveConf conf, String alias, Map prunedPartitionsMap) throws SemanticException { + LOG.trace("Started pruning partiton"); LOG.trace("dbname = " + tab.getDbName()); LOG.trace("tabname = " + tab.getTableName()); - LOG.trace("prune Expression = " + prunerExpr); + LOG.trace("prune Expression = " + prunerExpr == null ? "" : prunerExpr); String key = tab.getDbName() + "." + tab.getTableName() + ";"; - if (prunerExpr != null) { - key = key + prunerExpr.getExprString(); + if (!tab.isPartitioned()) { + // If the table is not partitioned, return empty list. + return getAllPartsFromCacheOrServer(tab, key, false, prunedPartitionsMap); + } + + if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE)) + && !hasColumnExpr(prunerExpr)) { + // If the "strict" mode is on, we have to provide partition pruner for each table. + throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE + .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\"")); + } + + if (prunerExpr == null) { + // In non-strict mode and there is no predicates at all - get everything. + return getAllPartsFromCacheOrServer(tab, key, false, prunedPartitionsMap); + } + + Set partColsUsedInFilter = new LinkedHashSet(); + // Replace virtual columns with nulls. See javadoc for details. + prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), partColsUsedInFilter); + // Remove all parts that are not partition columns. See javadoc for details. + ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone()); + String oldFilter = prunerExpr.getExprString(); + if (compactExpr == null) { + // Non-strict mode, and all the predicates are on non-partition columns - get everything. + LOG.debug("Filter " + oldFilter + " was null after compacting"); + return getAllPartsFromCacheOrServer(tab, key, true, prunedPartitionsMap); } - PrunedPartitionList ret = prunedPartitionsMap.get(key); - if (ret != null) { - return ret; + LOG.debug("Filter w/ compacting: " + compactExpr.getExprString() + + "; filter w/o compacting: " + oldFilter); + + key = key + compactExpr.getExprString(); + PrunedPartitionList ppList = prunedPartitionsMap.get(key); + if (ppList != null) { + return ppList; + } + + ppList = getPartitionsFromServer(tab, compactExpr, conf, alias, partColsUsedInFilter, oldFilter.equals(compactExpr.getExprString())); + prunedPartitionsMap.put(key, ppList); + return ppList; + } + + private static PrunedPartitionList getAllPartsFromCacheOrServer(Table tab, String key, boolean unknownPartitions, + Map partsCache) throws SemanticException { + PrunedPartitionList ppList = partsCache.get(key); + if (ppList != null) { + return ppList; + } + Set parts; + try { + parts = getAllPartitions(tab); + } catch (HiveException e) { + throw new SemanticException(e); } + ppList = new PrunedPartitionList(tab, parts, null, unknownPartitions); + partsCache.put(key, ppList); + return ppList; + } - ret = getPartitionsFromServer(tab, prunerExpr, conf, alias); - prunedPartitionsMap.put(key, ret); - return ret; + private static ExprNodeDesc removeTruePredciates(ExprNodeDesc e) { + if (e instanceof ExprNodeConstantDesc) { + ExprNodeConstantDesc eC = (ExprNodeConstantDesc) e; + if (e.getTypeInfo() == TypeInfoFactory.booleanTypeInfo + && eC.getValue() == Boolean.TRUE) { + return null; + } + } + return e; } /** @@ -187,7 +246,8 @@ private static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, */ static private ExprNodeDesc compactExpr(ExprNodeDesc expr) { if (expr instanceof ExprNodeConstantDesc) { - if (((ExprNodeConstantDesc)expr).getValue() == null) { + expr = removeTruePredciates(expr); + if (expr == null || ((ExprNodeConstantDesc)expr).getValue() == null) { return null; } else { throw new IllegalStateException("Unexpected non-null ExprNodeConstantDesc: " @@ -198,10 +258,11 @@ static private ExprNodeDesc compactExpr(ExprNodeDesc expr) { boolean isAnd = udf instanceof GenericUDFOPAnd; if (isAnd || udf instanceof GenericUDFOPOr) { List children = expr.getChildren(); - ExprNodeDesc left = children.get(0); - children.set(0, compactExpr(left)); - ExprNodeDesc right = children.get(1); - children.set(1, compactExpr(right)); + ExprNodeDesc left = removeTruePredciates(children.get(0)); + children.set(0, left == null ? null : compactExpr(left)); + ExprNodeDesc right = removeTruePredciates(children.get(1)); + children.set(1, right == null ? null : compactExpr(right)); + // Note that one does not simply compact (not-null or null) to not-null. // Only if we have an "and" is it valid to send one side to metastore. if (children.get(0) == null && children.get(1) == null) { @@ -267,40 +328,8 @@ static private boolean hasUserFunctions(ExprNodeDesc expr) { } private static PrunedPartitionList getPartitionsFromServer(Table tab, - ExprNodeDesc prunerExpr, HiveConf conf, String alias) throws SemanticException { + final ExprNodeGenericFuncDesc compactExpr, HiveConf conf, String alias, Set partColsUsedInFilter, boolean isPruningByExactFilter) throws SemanticException { try { - if (!tab.isPartitioned()) { - // If the table is not partitioned, return everything. - return new PrunedPartitionList(tab, getAllPartitions(tab), null, false); - } - LOG.debug("tabname = " + tab.getTableName() + " is partitioned"); - - if ("strict".equalsIgnoreCase(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE)) - && !hasColumnExpr(prunerExpr)) { - // If the "strict" mode is on, we have to provide partition pruner for each table. - throw new SemanticException(ErrorMsg.NO_PARTITION_PREDICATE - .getMsg("for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\"")); - } - - if (prunerExpr == null) { - // Non-strict mode, and there is no predicates at all - get everything. - return new PrunedPartitionList(tab, getAllPartitions(tab), null, false); - } - - Set referred = new LinkedHashSet(); - // Replace virtual columns with nulls. See javadoc for details. - prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), referred); - // Remove all parts that are not partition columns. See javadoc for details. - ExprNodeGenericFuncDesc compactExpr = (ExprNodeGenericFuncDesc)compactExpr(prunerExpr.clone()); - String oldFilter = prunerExpr.getExprString(); - if (compactExpr == null) { - // Non-strict mode, and all the predicates are on non-partition columns - get everything. - LOG.debug("Filter " + oldFilter + " was null after compacting"); - return new PrunedPartitionList(tab, getAllPartitions(tab), null, true); - } - - LOG.debug("Filter w/ compacting: " + compactExpr.getExprString() - + "; filter w/o compacting: " + oldFilter); // Finally, check the filter for non-built-in UDFs. If these are present, we cannot // do filtering on the server, and have to fall back to client path. @@ -330,9 +359,8 @@ private static PrunedPartitionList getPartitionsFromServer(Table tab, // The partitions are "unknown" if the call says so due to the expression // evaluator returning null for a partition, or if we sent a partial expression to // metastore and so some partitions may have no data based on other filters. - boolean isPruningByExactFilter = oldFilter.equals(compactExpr.getExprString()); return new PrunedPartitionList(tab, new LinkedHashSet(partitions), - new ArrayList(referred), + new ArrayList(partColsUsedInFilter), hasUnknownPartitions || !isPruningByExactFilter); } catch (SemanticException e) { throw e; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 13d1f88..ab7eee8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -18,8 +18,14 @@ package org.apache.hadoop.hive.ql.optimizer.stats.annotation; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import java.lang.reflect.Field; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -31,10 +37,12 @@ import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.LimitOperator; import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.OperatorUtils; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.SelectOperator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.exec.tez.DagUtils; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; @@ -48,10 +56,12 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.Statistics; import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; @@ -66,17 +76,15 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; public class StatsRulesProcFactory { private static final Log LOG = LogFactory.getLog(StatsRulesProcFactory.class.getName()); + private static final boolean isDebugEnabled = LOG.isDebugEnabled(); /** * Collect basic statistics like number of rows, data size and column level statistics from the @@ -103,9 +111,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Statistics stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, table, tsop); tsop.setStatistics(stats.clone()); - if (LOG.isDebugEnabled()) { - LOG.debug("[0] STATS-" + tsop.toString() + " (" + table.getTableName() - + "): " + stats.extendedToString()); + if (isDebugEnabled) { + LOG.debug("[0] STATS-" + tsop.toString() + " (" + table.getTableName() + "): " + + stats.extendedToString()); } } catch (CloneNotSupportedException e) { throw new SemanticException(ErrorMsg.STATISTICS_CLONING_FAILED.getMsg()); @@ -167,14 +175,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, stats.setDataSize(setMaxIfInvalid(dataSize)); sop.setStatistics(stats); - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[0] STATS-" + sop.toString() + ": " + stats.extendedToString()); } } else { if (parentStats != null) { sop.setStatistics(parentStats.clone()); - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[1] STATS-" + sop.toString() + ": " + parentStats.extendedToString()); } } @@ -264,7 +272,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, updateStats(st, newNumRows, true, fop); } - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[0] STATS-" + fop.toString() + ": " + st.extendedToString()); } } else { @@ -274,7 +282,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, updateStats(st, newNumRows, false, fop); } - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[1] STATS-" + fop.toString() + ": " + st.extendedToString()); } } @@ -576,52 +584,103 @@ private long evaluateChildExpr(Statistics stats, ExprNodeDesc child, @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { + GroupByOperator gop = (GroupByOperator) nd; Operator parent = gop.getParentOperators().get(0); Statistics parentStats = parent.getStatistics(); + + // parent stats are not populated yet + if (parentStats == null) { + return null; + } + AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; HiveConf conf = aspCtx.getConf(); - int mapSideParallelism = - HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_STATS_MAP_SIDE_PARALLELISM); + long maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE); List aggDesc = gop.getConf().getAggregators(); Map colExprMap = gop.getColumnExprMap(); RowSchema rs = gop.getSchema(); Statistics stats = null; + List colStats = StatsUtils.getColStatisticsFromExprMap(conf, parentStats, + colExprMap, rs); + long cardinality; + long parallelism = 1L; boolean mapSide = false; - int multiplier = mapSideParallelism; - long newNumRows; - long newDataSize; + boolean mapSideHashAgg = false; + long inputSize = 1L; + boolean containsGroupingSet = gop.getConf().isGroupingSetsPresent(); + long sizeOfGroupingSet = + containsGroupingSet ? gop.getConf().getListGroupingSets().size() : 1L; + + // There are different cases for Group By depending on map/reduce side, hash aggregation, + // grouping sets and column stats. If we don't have column stats, we just assume hash + // aggregation is disabled. Following are the possible cases and rule for cardinality + // estimation + + // MAP SIDE: + // Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows + // Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet + // Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) + // Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) + // Case 5: column stats, NO hash aggregation, NO grouping sets — numRows + // Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet + + // REDUCE SIDE: + // Case 7: NO column stats — numRows / 2 + // Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) + // Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) - // map side if (gop.getChildOperators().get(0) instanceof ReduceSinkOperator || gop.getChildOperators().get(0) instanceof AppMasterEventOperator) { - mapSide = true; - - // map-side grouping set present. if grouping set is present then - // multiply the number of rows by number of elements in grouping set - if (gop.getConf().isGroupingSetsPresent()) { - multiplier *= gop.getConf().getListGroupingSets().size(); + mapSide = true; + + // consider approximate map side parallelism to be table data size + // divided by max split size + TableScanOperator top = OperatorUtils.findSingleOperatorUpstream(gop, + TableScanOperator.class); + // if top is null then there are multiple parents (RS as well), hence + // lets use parent statistics to get data size. Also maxSplitSize should + // be updated to bytes per reducer (1GB default) + if (top == null) { + inputSize = parentStats.getDataSize(); + maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.BYTESPERREDUCER); + } else { + inputSize = top.getConf().getStatistics().getDataSize(); } + parallelism = (int) Math.ceil((double) inputSize / maxSplitSize); + } + + if (isDebugEnabled) { + LOG.debug("STATS-" + gop.toString() + ": inputSize: " + inputSize + " maxSplitSize: " + + maxSplitSize + " parallelism: " + parallelism + " containsGroupingSet: " + + containsGroupingSet + " sizeOfGroupingSet: " + sizeOfGroupingSet); } try { + // satisfying precondition means column statistics is available if (satisfyPrecondition(parentStats)) { - stats = parentStats.clone(); - List colStats = - StatsUtils.getColStatisticsFromExprMap(conf, parentStats, colExprMap, rs); + // check if map side aggregation is possible or not based on column stats + mapSideHashAgg = checkMapSideAggregation(gop, colStats, conf); + + if (isDebugEnabled) { + LOG.debug("STATS-" + gop.toString() + " mapSideHashAgg: " + mapSideHashAgg); + } + + stats = parentStats.clone(); stats.setColumnStats(colStats); - long dvProd = 1; + long ndvProduct = 1; + final long parentNumRows = stats.getNumRows(); // compute product of distinct values of grouping columns for (ColStatistics cs : colStats) { if (cs != null) { - long dv = cs.getCountDistint(); + long ndv = cs.getCountDistint(); if (cs.getNumNulls() > 0) { - dv += 1; + ndv += 1; } - dvProd *= dv; + ndvProduct *= ndv; } else { if (parentStats.getColumnStatsState().equals(Statistics.State.COMPLETE)) { // the column must be an aggregate column inserted by GBY. We @@ -632,65 +691,130 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // partial column statistics on grouping attributes case. // if column statistics on grouping attribute is missing, then // assume worst case. - // GBY rule will emit half the number of rows if dvProd is 0 - dvProd = 0; + // GBY rule will emit half the number of rows if ndvProduct is 0 + ndvProduct = 0; } break; } } - // map side + // if ndvProduct is 0 then column stats state must be partial and we are missing + // column stats for a group by column + if (ndvProduct == 0) { + ndvProduct = parentNumRows / 2; + + if (isDebugEnabled) { + LOG.debug("STATS-" + gop.toString() + ": ndvProduct became 0 as some column does not" + + " have stats. ndvProduct changed to: " + ndvProduct); + } + } + if (mapSide) { + // MAP SIDE - // since we do not know if hash-aggregation will be enabled or disabled - // at runtime we will assume that map-side group by does not do any - // reduction.hence no group by rule will be applied - - // map-side grouping set present. if grouping set is present then - // multiply the number of rows by number of elements in grouping set - if (gop.getConf().isGroupingSetsPresent()) { - newNumRows = setMaxIfInvalid(multiplier * stats.getNumRows()); - newDataSize = setMaxIfInvalid(multiplier * stats.getDataSize()); - stats.setNumRows(newNumRows); - stats.setDataSize(newDataSize); - for (ColStatistics cs : colStats) { - if (cs != null) { - long oldNumNulls = cs.getNumNulls(); - long newNumNulls = multiplier * oldNumNulls; - cs.setNumNulls(newNumNulls); + if (mapSideHashAgg) { + if (containsGroupingSet) { + // Case 4: column stats, hash aggregation, grouping sets + cardinality = Math.min((parentNumRows * sizeOfGroupingSet) / 2, + ndvProduct * parallelism * sizeOfGroupingSet); + + if (isDebugEnabled) { + LOG.debug("[Case 4] STATS-" + gop.toString() + ": cardinality: " + cardinality); + } + } else { + // Case 3: column stats, hash aggregation, NO grouping sets + cardinality = Math.min(parentNumRows / 2, ndvProduct * parallelism); + + if (isDebugEnabled) { + LOG.debug("[Case 3] STATS-" + gop.toString() + ": cardinality: " + cardinality); } } } else { + if (containsGroupingSet) { + // Case 6: column stats, NO hash aggregation, grouping sets + cardinality = parentNumRows * sizeOfGroupingSet; - // map side no grouping set - newNumRows = stats.getNumRows() * multiplier; - updateStats(stats, newNumRows, true, gop); + if (isDebugEnabled) { + LOG.debug("[Case 6] STATS-" + gop.toString() + ": cardinality: " + cardinality); + } + } else { + // Case 5: column stats, NO hash aggregation, NO grouping sets + cardinality = parentNumRows; + + if (isDebugEnabled) { + LOG.debug("[Case 5] STATS-" + gop.toString() + ": cardinality: " + cardinality); + } + } } } else { + // REDUCE SIDE + + // in reduce side GBY, we don't know if the grouping set was present or not. so get it + // from map side GBY + GroupByOperator mGop = OperatorUtils.findSingleOperatorUpstream(parent, GroupByOperator.class); + if (mGop != null) { + containsGroupingSet = mGop.getConf().isGroupingSetsPresent(); + sizeOfGroupingSet = mGop.getConf().getListGroupingSets().size(); + } + + if (containsGroupingSet) { + // Case 8: column stats, grouping sets + cardinality = Math.min(parentNumRows, ndvProduct * sizeOfGroupingSet); + + if (isDebugEnabled) { + LOG.debug("[Case 8] STATS-" + gop.toString() + ": cardinality: " + cardinality); + } + } else { + // Case 9: column stats, NO grouping sets + cardinality = Math.min(parentNumRows, ndvProduct); - // reduce side - newNumRows = applyGBYRule(stats.getNumRows(), dvProd); - updateStats(stats, newNumRows, true, gop); + if (isDebugEnabled) { + LOG.debug("[Case 9] STATS-" + gop.toString() + ": cardinality: " + cardinality); + } + } } + + // update stats, but don't update NDV as it will not change + updateStats(stats, cardinality, true, gop, false); } else { + + // NO COLUMN STATS if (parentStats != null) { stats = parentStats.clone(); + final long parentNumRows = stats.getNumRows(); - // worst case, in the absence of column statistics assume half the rows are emitted + // if we don't have column stats, we just assume hash aggregation is disabled if (mapSide) { + // MAP SIDE + + if (containsGroupingSet) { + // Case 2: NO column stats, NO hash aggregation, grouping sets + cardinality = parentNumRows * sizeOfGroupingSet; + + if (isDebugEnabled) { + LOG.debug("[Case 2] STATS-" + gop.toString() + ": cardinality: " + cardinality); + } + } else { + // Case 1: NO column stats, NO hash aggregation, NO grouping sets + cardinality = parentNumRows; - // map side - newNumRows = multiplier * stats.getNumRows(); - newDataSize = multiplier * stats.getDataSize(); - stats.setNumRows(newNumRows); - stats.setDataSize(newDataSize); + if (isDebugEnabled) { + LOG.debug("[Case 1] STATS-" + gop.toString() + ": cardinality: " + cardinality); + } + } } else { + // REDUCE SIDE + + // Case 7: NO column stats + cardinality = parentNumRows / 2; - // reduce side - newNumRows = parentStats.getNumRows() / 2; - updateStats(stats, newNumRows, false, gop); + if (isDebugEnabled) { + LOG.debug("[Case 7] STATS-" + gop.toString() + ": cardinality: " + cardinality); + } } + + updateStats(stats, cardinality, false, gop); } } @@ -738,7 +862,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, gop.setStatistics(stats); - if (LOG.isDebugEnabled() && stats != null) { + if (isDebugEnabled && stats != null) { LOG.debug("[0] STATS-" + gop.toString() + ": " + stats.extendedToString()); } } catch (CloneNotSupportedException e) { @@ -747,6 +871,107 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } + /** + * This method does not take into account many configs used at runtime to + * disable hash aggregation like HIVEMAPAGGRHASHMINREDUCTION. This method + * roughly estimates the number of rows and size of each row to see if it + * can fit in hashtable for aggregation. + * @param gop - group by operator + * @param colStats - column stats for key columns + * @param conf - hive conf + * @return + */ + private boolean checkMapSideAggregation(GroupByOperator gop, + List colStats, HiveConf conf) { + + List aggDesc = gop.getConf().getAggregators(); + GroupByDesc desc = gop.getConf(); + GroupByDesc.Mode mode = desc.getMode(); + + if (mode.equals(GroupByDesc.Mode.HASH)) { + float hashAggMem = conf.getFloatVar( + HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float hashAggMaxThreshold = conf.getFloatVar( + HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + + // get memory for container. May be use mapreduce.map.java.opts instead? + long totalMemory = + DagUtils.getContainerResource(conf).getMemory() * 1000L * 1000L; + long maxMemHashAgg = Math + .round(totalMemory * hashAggMem * hashAggMaxThreshold); + + // estimated number of rows will be product of NDVs + long numEstimatedRows = 1; + + // estimate size of key from column statistics + long avgKeySize = 0; + for (ColStatistics cs : colStats) { + if (cs != null) { + numEstimatedRows *= cs.getCountDistint(); + avgKeySize += Math.ceil(cs.getAvgColLen()); + } + } + + // average value size will be sum of all sizes of aggregation buffers + long avgValSize = 0; + // go over all aggregation buffers and see they implement estimable + // interface if so they aggregate the size of the aggregation buffer + GenericUDAFEvaluator[] aggregationEvaluators; + aggregationEvaluators = new GenericUDAFEvaluator[aggDesc.size()]; + + // get aggregation evaluators + for (int i = 0; i < aggregationEvaluators.length; i++) { + AggregationDesc agg = aggDesc.get(i); + aggregationEvaluators[i] = agg.getGenericUDAFEvaluator(); + } + + // estimate size of aggregation buffer + for (int i = 0; i < aggregationEvaluators.length; i++) { + + // each evaluator has constant java object overhead + avgValSize += gop.javaObjectOverHead; + GenericUDAFEvaluator.AggregationBuffer agg = null; + try { + agg = aggregationEvaluators[i].getNewAggregationBuffer(); + } catch (HiveException e) { + // in case of exception assume unknown type (256 bytes) + avgValSize += gop.javaSizeUnknownType; + } + + // aggregate size from aggregation buffers + if (agg != null) { + if (GenericUDAFEvaluator.isEstimable(agg)) { + avgValSize += ((GenericUDAFEvaluator.AbstractAggregationBuffer) agg) + .estimate(); + } else { + // if the aggregation buffer is not estimable then get all the + // declared fields and compute the sizes from field types + Field[] fArr = ObjectInspectorUtils + .getDeclaredNonStaticFields(agg.getClass()); + for (Field f : fArr) { + long avgSize = StatsUtils + .getAvgColLenOfFixedLengthTypes(f.getType().getName()); + avgValSize += avgSize == 0 ? gop.javaSizeUnknownType : avgSize; + } + } + } + } + + // total size of each hash entry + long hashEntrySize = gop.javaHashEntryOverHead + avgKeySize + avgValSize; + + // estimated hash table size + long estHashTableSize = numEstimatedRows * hashEntrySize; + + if (estHashTableSize < maxMemHashAgg) { + return true; + } + } + + // worst-case, hash aggregation disabled + return false; + } + private long applyGBYRule(long numRows, long dvProd) { long newNumRows = numRows; @@ -967,7 +1192,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, outInTabAlias); jop.setStatistics(stats); - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[0] STATS-" + jop.toString() + ": " + stats.extendedToString()); } } else { @@ -1001,7 +1226,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, wcStats.setDataSize(setMaxIfInvalid(newDataSize)); jop.setStatistics(wcStats); - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[1] STATS-" + jop.toString() + ": " + wcStats.extendedToString()); } } @@ -1195,7 +1420,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } lop.setStatistics(stats); - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[0] STATS-" + lop.toString() + ": " + stats.extendedToString()); } } else { @@ -1213,7 +1438,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } lop.setStatistics(wcStats); - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[1] STATS-" + lop.toString() + ": " + wcStats.extendedToString()); } } @@ -1281,7 +1506,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, outStats.setColumnStats(colStats); } rop.setStatistics(outStats); - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[0] STATS-" + rop.toString() + ": " + outStats.extendedToString()); } } catch (CloneNotSupportedException e) { @@ -1322,7 +1547,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, stats.addToColumnStats(parentStats.getColumnStats()); op.getConf().setStatistics(stats); - if (LOG.isDebugEnabled()) { + if (isDebugEnabled) { LOG.debug("[0] STATS-" + op.toString() + ": " + stats.extendedToString()); } } @@ -1378,6 +1603,7 @@ public static NodeProcessor getDefaultRule() { return new DefaultStatsRule(); } + /** * Update the basic statistics of the statistics object based on the row number * @param stats @@ -1389,6 +1615,12 @@ public static NodeProcessor getDefaultRule() { */ static void updateStats(Statistics stats, long newNumRows, boolean useColStats, Operator op) { + updateStats(stats, newNumRows, useColStats, op, true); + } + + static void updateStats(Statistics stats, long newNumRows, + boolean useColStats, Operator op, + boolean updateNDV) { if (newNumRows <= 0) { LOG.info("STATS-" + op.toString() + ": Overflow in number of rows." @@ -1406,17 +1638,19 @@ static void updateStats(Statistics stats, long newNumRows, long oldNumNulls = cs.getNumNulls(); long oldDV = cs.getCountDistint(); long newNumNulls = Math.round(ratio * oldNumNulls); - long newDV = oldDV; - - // if ratio is greater than 1, then number of rows increases. This can happen - // when some operators like GROUPBY duplicates the input rows in which case - // number of distincts should not change. Update the distinct count only when - // the output number of rows is less than input number of rows. - if (ratio <= 1.0) { - newDV = (long) Math.ceil(ratio * oldDV); - } cs.setNumNulls(newNumNulls); - cs.setCountDistint(newDV); + if (updateNDV) { + long newDV = oldDV; + + // if ratio is greater than 1, then number of rows increases. This can happen + // when some operators like GROUPBY duplicates the input rows in which case + // number of distincts should not change. Update the distinct count only when + // the output number of rows is less than input number of rows. + if (ratio <= 1.0) { + newDV = (long) Math.ceil(ratio * oldDV); + } + cs.setCountDistint(newDV); + } } stats.setColumnStats(colStats); long newDataSize = StatsUtils.getDataSizeFromColumnStats(newNumRows, colStats); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index 4ff9678..37cbf7f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -207,7 +207,7 @@ protected static Hive createHiveDB(HiveConf conf) throws SemanticException { } public abstract void analyzeInternal(ASTNode ast) throws SemanticException; - public void init() { + public void init(boolean clearPartsCache) { //no-op } @@ -217,7 +217,7 @@ public void initCtx(Context ctx) { public void analyze(ASTNode ast, Context ctx) throws SemanticException { initCtx(ctx); - init(); + init(true); analyzeInternal(ast); } @@ -244,7 +244,7 @@ public void setFetchTask(FetchTask fetchTask) { this.fetchTask = fetchTask; } - protected void reset() { + protected void reset(boolean clearPartsCache) { rootTasks = new ArrayList>(); } @@ -406,7 +406,6 @@ public static void readProps( @SuppressWarnings("nls") public static String unescapeSQLString(String b) { - Character enclosure = null; // Some of the strings can be passed in as unicode. For example, the @@ -487,7 +486,7 @@ public static String unescapeSQLString(String b) { case '\\': sb.append("\\"); break; - // The following 2 lines are exactly what MySQL does + // The following 2 lines are exactly what MySQL does TODO: why do we do this? case '%': sb.append("\\%"); break; @@ -505,6 +504,58 @@ public static String unescapeSQLString(String b) { return sb.toString(); } + /** + * Escapes the string for AST; doesn't enclose it in quotes, however. + */ + public static String escapeSQLString(String b) { + // There's usually nothing to escape so we will be optimistic. + String result = b; + for (int i = 0; i < result.length(); ++i) { + char currentChar = result.charAt(i); + if (currentChar == '\\' && ((i + 1) < result.length())) { + // TODO: do we need to handle the "this is what MySQL does" here? + char nextChar = result.charAt(i + 1); + if (nextChar == '%' || nextChar == '_') { + ++i; + continue; + } + } + switch (currentChar) { + case '\0': result = spliceString(result, i, "\\0"); ++i; break; + case '\'': result = spliceString(result, i, "\\'"); ++i; break; + case '\"': result = spliceString(result, i, "\\\""); ++i; break; + case '\b': result = spliceString(result, i, "\\b"); ++i; break; + case '\n': result = spliceString(result, i, "\\n"); ++i; break; + case '\r': result = spliceString(result, i, "\\r"); ++i; break; + case '\t': result = spliceString(result, i, "\\t"); ++i; break; + case '\\': result = spliceString(result, i, "\\\\"); ++i; break; + case '\u001A': result = spliceString(result, i, "\\Z"); ++i; break; + default: { + if (currentChar < ' ') { + String hex = Integer.toHexString(currentChar); + String unicode = "\\u"; + for (int j = 4; j > hex.length(); --j) { + unicode += '0'; + } + unicode += hex; + result = spliceString(result, i, unicode); + i += (unicode.length() - 1); + } + break; // if not a control character, do nothing + } + } + } + return result; + } + + private static String spliceString(String str, int i, String replacement) { + return spliceString(str, i, 1, replacement); + } + + private static String spliceString(String str, int i, int length, String replacement) { + return str.substring(0, i) + replacement + str.substring(i + length); + } + public HashSet getInputs() { return inputs; } @@ -1234,7 +1285,7 @@ protected Database getDatabase(String dbName, boolean throwException) throws Sem try { database = db.getDatabase(dbName); } catch (Exception e) { - throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName), e); + throw new SemanticException(e.getMessage(), e); } if (database == null && throwException) { throw new SemanticException(ErrorMsg.DATABASE_NOT_EXISTS.getMsg(dbName)); @@ -1264,9 +1315,13 @@ protected Table getTable(String database, String tblName, boolean throwException try { tab = database == null ? db.getTable(tblName, false) : db.getTable(database, tblName, false); - } catch (Exception e) { + } + catch (InvalidTableException e) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName), e); } + catch (Exception e) { + throw new SemanticException(e.getMessage(), e); + } if (tab == null && throwException) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tblName)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 9fc1aa0..73348d8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Table; /** @@ -58,7 +59,7 @@ private Table tbl; public ColumnStatsSemanticAnalyzer(HiveConf conf) throws SemanticException { - super(conf); + super(conf, false); } private boolean shouldRewrite(ASTNode tree) { @@ -95,8 +96,10 @@ private Table getTable(ASTNode tree) throws SemanticException { String tableName = getUnescapedName((ASTNode) tree.getChild(0).getChild(0)); try { return db.getTable(tableName); + } catch (InvalidTableException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); + throw new SemanticException(e.getMessage(), e); } } @@ -377,7 +380,7 @@ public void analyze(ASTNode ast, Context origCtx) throws SemanticException { QBParseInfo qbp; // initialize QB - init(); + init(true); // check if it is no scan. grammar prevents coexit noscan/columns super.processNoScanCommand(ast); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 8c2564f..4e58ad8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -79,6 +79,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.authorization.AuthorizationParseUtils; @@ -267,11 +268,11 @@ public void analyzeInternal(ASTNode input) throws SemanticException { } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_UNARCHIVE) { analyzeAlterTableArchive(qualified, ast, true); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDCOLS) { - analyzeAlterTableModifyCols(qualified, ast, AlterTableTypes.ADDCOLS); + analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.ADDCOLS); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_REPLACECOLS) { - analyzeAlterTableModifyCols(qualified, ast, AlterTableTypes.REPLACECOLS); + analyzeAlterTableModifyCols(qualified, ast, partSpec, AlterTableTypes.REPLACECOLS); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_RENAMECOL) { - analyzeAlterTableRenameCol(qualified, ast); + analyzeAlterTableRenameCol(qualified, ast, partSpec); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_ADDPARTS) { analyzeAlterTableAddParts(qualified, ast, false); } else if (ast.getType() == HiveParser.TOK_ALTERTABLE_DROPPARTS) { @@ -847,7 +848,8 @@ private void analyzeDropTable(ASTNode ast, boolean expectView) outputs.add(new WriteEntity(tab, WriteEntity.WriteType.DDL_EXCLUSIVE)); } - DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists); + boolean ifPurge = (ast.getFirstChildWithType(HiveParser.KW_PURGE) != null); + DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView, ifExists, ifPurge); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } @@ -1717,7 +1719,8 @@ static public String getFullyQualifiedName(ASTNode ast) { // assume the first component of DOT delimited name is tableName // get the attemptTableName - static public String getAttemptTableName(Hive db, String qualifiedName, boolean isColumn) { + static public String getAttemptTableName(Hive db, String qualifiedName, boolean isColumn) + throws SemanticException { // check whether the name starts with table // DESCRIBE table // DESCRIBE table.column @@ -1738,11 +1741,13 @@ static public String getAttemptTableName(Hive db, String qualifiedName, boolean return tableName; } } - } catch (HiveException e) { + } catch (InvalidTableException e) { // assume the first DOT delimited component is tableName // OK if it is not // do nothing when having exception return null; + } catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); } return null; } @@ -1823,7 +1828,7 @@ static public String getColPath( ASTNode parentAst, ASTNode ast, String tableName, - Map partSpec) { + Map partSpec) throws SemanticException { // if parent has two children // it could be DESCRIBE table key @@ -1879,11 +1884,13 @@ static public String getColPath( Table tab = null; try { tab = db.getTable(tableName); - } catch (HiveException e) { - // if table not valid - // throw semantic exception + } + catch (InvalidTableException e) { throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); } + catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); + } if (partSpec != null) { Partition part = null; @@ -2480,7 +2487,8 @@ private void analyzeAlterTableRename(String[] source, ASTNode ast, boolean expec alterTblDesc), conf)); } - private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast) throws SemanticException { + private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast, + HashMap partSpec) throws SemanticException { String newComment = null; String newType = null; newType = getTypeStringFromAST((ASTNode) ast.getChild(2)); @@ -2521,10 +2529,10 @@ private void analyzeAlterTableRenameCol(String[] qualified, ASTNode ast) throws } String tblName = getDotName(qualified); - AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, + AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, unescapeIdentifier(oldColName), unescapeIdentifier(newColName), newType, newComment, first, flagCol); - addInputsOutputsAlterTable(tblName, null, alterTblDesc); + addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); @@ -2568,14 +2576,14 @@ private void analyzeAlterTableBucketNum(ASTNode ast, String tblName, } private void analyzeAlterTableModifyCols(String[] qualified, ASTNode ast, - AlterTableTypes alterType) throws SemanticException { + HashMap partSpec, AlterTableTypes alterType) throws SemanticException { String tblName = getDotName(qualified); List newCols = getColumns((ASTNode) ast.getChild(0)); - AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols, + AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, partSpec, newCols, alterType); - addInputsOutputsAlterTable(tblName, null, alterTblDesc); + addInputsOutputsAlterTable(tblName, partSpec, alterTblDesc); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g index 9463ef1..b72ee5d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g @@ -263,7 +263,7 @@ searchCondition // INSERT INTO (col1,col2,...) SELECT * FROM (VALUES(1,2,3),(4,5,6),...) as Foo(a,b,c) valueRowConstructor : - LPAREN atomExpression (COMMA atomExpression)* RPAREN -> ^(TOK_VALUE_ROW atomExpression+) + LPAREN precedenceUnaryPrefixExpression (COMMA precedenceUnaryPrefixExpression)* RPAREN -> ^(TOK_VALUE_ROW precedenceUnaryPrefixExpression+) ; valuesTableConstructor diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java index f9b875e..22e5b47 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java @@ -22,6 +22,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; @@ -81,7 +83,7 @@ private void analyzeCreateFunction(ASTNode ast) throws SemanticException { new CreateFunctionDesc(functionName, isTemporaryFunction, className, resources); rootTasks.add(TaskFactory.get(new FunctionWork(desc), conf)); - addEntities(functionName, isTemporaryFunction); + addEntities(functionName, isTemporaryFunction, resources); } private void analyzeDropFunction(ASTNode ast) throws SemanticException { @@ -106,7 +108,7 @@ private void analyzeDropFunction(ASTNode ast) throws SemanticException { DropFunctionDesc desc = new DropFunctionDesc(functionName, isTemporaryFunction); rootTasks.add(TaskFactory.get(new FunctionWork(desc), conf)); - addEntities(functionName, isTemporaryFunction); + addEntities(functionName, isTemporaryFunction, null); } private ResourceType getResourceType(ASTNode token) throws SemanticException { @@ -152,8 +154,8 @@ private ResourceType getResourceType(ASTNode token) throws SemanticException { /** * Add write entities to the semantic analyzer to restrict function creation to privileged users. */ - private void addEntities(String functionName, boolean isTemporaryFunction) - throws SemanticException { + private void addEntities(String functionName, boolean isTemporaryFunction, + List resources) throws SemanticException { // If the function is being added under a database 'namespace', then add an entity representing // the database (only applicable to permanent/metastore functions). // We also add a second entity representing the function name. @@ -183,5 +185,13 @@ private void addEntities(String functionName, boolean isTemporaryFunction) // Add the function name as a WriteEntity outputs.add(new WriteEntity(database, functionName, Type.FUNCTION, WriteEntity.WriteType.DDL_NO_LOCK)); + + if (resources != null) { + for (ResourceUri resource : resources) { + String uriPath = resource.getUri(); + outputs.add(new WriteEntity(new Path(uriPath), + FileUtils.isLocalFile(conf, uriPath))); + } + } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java index e34ce28..90616ad 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; import org.apache.hadoop.hive.ql.exec.DependencyCollectionTask; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; @@ -45,6 +46,7 @@ import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.MergeJoinWork; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; @@ -132,6 +134,8 @@ // remember which reducesinks we've already connected public final Set connectedReduceSinks; + public final Map, MergeJoinWork> opMergeJoinWorkMap; + public CommonMergeJoinOperator currentMergeJoinOperator; // remember the event operators we've seen public final Set eventOperatorSet; @@ -176,6 +180,8 @@ public GenTezProcContext(HiveConf conf, ParseContext parseContext, this.eventOperatorSet = new LinkedHashSet(); this.abandonedEventOperatorSet = new LinkedHashSet(); this.tsToEventMap = new LinkedHashMap>(); + this.opMergeJoinWorkMap = new LinkedHashMap, MergeJoinWork>(); + this.currentMergeJoinOperator = null; rootTasks.add(currentTask); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index f061516..f2723ec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -167,7 +167,8 @@ protected void setupReduceSink(GenTezProcContext context, ReduceWork reduceWork, GenMapRedUtils.setKeyAndValueDesc(reduceWork, reduceSink); // remember which parent belongs to which tag - reduceWork.getTagToInput().put(reduceSink.getConf().getTag(), + int tag = reduceSink.getConf().getTag(); + reduceWork.getTagToInput().put(tag == -1 ? 0 : tag, context.preceedingWork.getName()); // remember the output name of the reduce sink diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java index b304fd3..516e576 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java @@ -28,6 +28,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; +import org.apache.hadoop.hive.ql.exec.DummyStoreOperator; import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; @@ -38,11 +40,14 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.plan.BaseWork; +import org.apache.hadoop.hive.ql.plan.MergeJoinWork; +import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.ReduceWork; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; import org.apache.hadoop.hive.ql.plan.UnionWork; /** @@ -126,6 +131,48 @@ public Object process(Node nd, Stack stack, context.childToWorkMap.get(operator).add(work); } + // this transformation needs to be first because it changes the work item itself. + // which can affect the working of all downstream transformations. + if (context.currentMergeJoinOperator != null) { + // we are currently walking the big table side of the merge join. we need to create or hook up + // merge join work. + MergeJoinWork mergeJoinWork = null; + if (context.opMergeJoinWorkMap.containsKey(operator)) { + // we have found a merge work corresponding to this closing operator. Hook up this work. + mergeJoinWork = context.opMergeJoinWorkMap.get(operator); + } else { + // we need to create the merge join work + mergeJoinWork = new MergeJoinWork(); + mergeJoinWork.setMergeJoinOperator(context.currentMergeJoinOperator); + tezWork.add(mergeJoinWork); + context.opMergeJoinWorkMap.put(operator, mergeJoinWork); + } + // connect the work correctly. + mergeJoinWork.addMergedWork(work, null); + Operator parentOp = + getParentFromStack(context.currentMergeJoinOperator, stack); + int pos = context.currentMergeJoinOperator.getTagForOperator(parentOp); + work.setTag(pos); + tezWork.setVertexType(work, VertexType.MULTI_INPUT_UNINITIALIZED_EDGES); + for (BaseWork parentWork : tezWork.getParents(work)) { + TezEdgeProperty edgeProp = tezWork.getEdgeProperty(parentWork, work); + tezWork.disconnect(parentWork, work); + tezWork.connect(parentWork, mergeJoinWork, edgeProp); + } + + for (BaseWork childWork : tezWork.getChildren(work)) { + TezEdgeProperty edgeProp = tezWork.getEdgeProperty(work, childWork); + tezWork.disconnect(work, childWork); + tezWork.connect(mergeJoinWork, childWork, edgeProp); + } + tezWork.remove(work); + context.rootToWorkMap.put(root, mergeJoinWork); + context.childToWorkMap.get(operator).remove(work); + context.childToWorkMap.get(operator).add(mergeJoinWork); + work = mergeJoinWork; + context.currentMergeJoinOperator = null; + } + // remember which mapjoin operator links with which work if (!context.currentMapJoinOperators.isEmpty()) { for (MapJoinOperator mj: context.currentMapJoinOperators) { @@ -169,6 +216,9 @@ public Object process(Node nd, Stack stack, LOG.debug("connecting "+parentWork.getName()+" with "+work.getName()); TezEdgeProperty edgeProp = parentWorkMap.getValue(); tezWork.connect(parentWork, work, edgeProp); + if (edgeProp.getEdgeType() == EdgeType.CUSTOM_EDGE) { + tezWork.setVertexType(work, VertexType.INITIALIZED_EDGES); + } // need to set up output name for reduce sink now that we know the name // of the downstream work @@ -192,14 +242,6 @@ public Object process(Node nd, Stack stack, context.currentMapJoinOperators.clear(); } - // This is where we cut the tree as described above. We also remember that - // we might have to connect parent work with this work later. - for (Operator parent: new ArrayList>(root.getParentOperators())) { - context.leafOperatorToFollowingWork.put(parent, work); - LOG.debug("Removing " + parent + " as parent from " + root); - root.removeParent(parent); - } - if (!context.currentUnionOperators.isEmpty()) { // if there are union all operators we need to add the work to the set // of union operators. @@ -229,6 +271,21 @@ public Object process(Node nd, Stack stack, work = unionWork; } + + // This is where we cut the tree as described above. We also remember that + // we might have to connect parent work with this work later. + boolean removeParents = false; + for (Operator parent: new ArrayList>(root.getParentOperators())) { + removeParents = true; + context.leafOperatorToFollowingWork.put(parent, work); + LOG.debug("Removing " + parent + " as parent from " + root); + } + if (removeParents) { + for (Operator parent : new ArrayList>(root.getParentOperators())) { + root.removeParent(parent); + } + } + // We're scanning a tree from roots to leaf (this is not technically // correct, demux and mux operators might form a diamond shape, but // we will only scan one path and ignore the others, because the @@ -248,31 +305,64 @@ public Object process(Node nd, Stack stack, LOG.debug("Second pass. Leaf operator: "+operator +" has common downstream work:"+followingWork); - // need to add this branch to the key + value info - assert operator instanceof ReduceSinkOperator - && followingWork instanceof ReduceWork; - ReduceSinkOperator rs = (ReduceSinkOperator) operator; - ReduceWork rWork = (ReduceWork) followingWork; - GenMapRedUtils.setKeyAndValueDesc(rWork, rs); - - // remember which parent belongs to which tag - rWork.getTagToInput().put(rs.getConf().getTag(), work.getName()); - - // remember the output name of the reduce sink - rs.getConf().setOutputName(rWork.getName()); - - if (!context.connectedReduceSinks.contains(rs)) { - // add dependency between the two work items - TezEdgeProperty edgeProp; - if (rWork.isAutoReduceParallelism()) { - edgeProp = - new TezEdgeProperty(context.conf, EdgeType.SIMPLE_EDGE, true, - rWork.getMinReduceTasks(), rWork.getMaxReduceTasks(), bytesPerReducer); + if (operator instanceof DummyStoreOperator) { + // this is the small table side. + assert (followingWork instanceof MergeJoinWork); + MergeJoinWork mergeJoinWork = (MergeJoinWork) followingWork; + CommonMergeJoinOperator mergeJoinOp = mergeJoinWork.getMergeJoinOperator(); + work.setTag(mergeJoinOp.getTagForOperator(operator)); + mergeJoinWork.addMergedWork(null, work); + tezWork.setVertexType(mergeJoinWork, VertexType.MULTI_INPUT_UNINITIALIZED_EDGES); + for (BaseWork parentWork : tezWork.getParents(work)) { + TezEdgeProperty edgeProp = tezWork.getEdgeProperty(parentWork, work); + tezWork.disconnect(parentWork, work); + tezWork.connect(parentWork, mergeJoinWork, edgeProp); + } + work = mergeJoinWork; + } else { + // need to add this branch to the key + value info + assert operator instanceof ReduceSinkOperator + && ((followingWork instanceof ReduceWork) || (followingWork instanceof MergeJoinWork) + || followingWork instanceof UnionWork); + ReduceSinkOperator rs = (ReduceSinkOperator) operator; + ReduceWork rWork = null; + if (followingWork instanceof MergeJoinWork) { + MergeJoinWork mergeJoinWork = (MergeJoinWork) followingWork; + rWork = (ReduceWork) mergeJoinWork.getMainWork(); + } else if (followingWork instanceof UnionWork) { + // this can only be possible if there is merge work followed by the union + UnionWork unionWork = (UnionWork) followingWork; + int index = getMergeIndex(tezWork, unionWork, rs); + // guaranteed to be instance of MergeJoinWork if index is valid + MergeJoinWork mergeJoinWork = (MergeJoinWork) tezWork.getChildren(unionWork).get(index); + // disconnect the connection to union work and connect to merge work + followingWork = mergeJoinWork; + rWork = (ReduceWork) mergeJoinWork.getMainWork(); } else { - edgeProp = new TezEdgeProperty(EdgeType.SIMPLE_EDGE); + rWork = (ReduceWork) followingWork; + } + GenMapRedUtils.setKeyAndValueDesc(rWork, rs); + + // remember which parent belongs to which tag + int tag = rs.getConf().getTag(); + rWork.getTagToInput().put(tag == -1 ? 0 : tag, work.getName()); + + // remember the output name of the reduce sink + rs.getConf().setOutputName(rWork.getName()); + + if (!context.connectedReduceSinks.contains(rs)) { + // add dependency between the two work items + TezEdgeProperty edgeProp; + if (rWork.isAutoReduceParallelism()) { + edgeProp = + new TezEdgeProperty(context.conf, EdgeType.SIMPLE_EDGE, true, + rWork.getMinReduceTasks(), rWork.getMaxReduceTasks(), bytesPerReducer); + } else { + edgeProp = new TezEdgeProperty(EdgeType.SIMPLE_EDGE); + } + tezWork.connect(work, followingWork, edgeProp); + context.connectedReduceSinks.add(rs); } - tezWork.connect(work, rWork, edgeProp); - context.connectedReduceSinks.add(rs); } } else { LOG.debug("First pass. Leaf operator: "+operator); @@ -289,4 +379,28 @@ public Object process(Node nd, Stack stack, return null; } + + private int getMergeIndex(TezWork tezWork, UnionWork unionWork, ReduceSinkOperator rs) { + int index = 0; + for (BaseWork baseWork : tezWork.getChildren(unionWork)) { + if (baseWork instanceof MergeJoinWork) { + MergeJoinWork mergeJoinWork = (MergeJoinWork) baseWork; + int tag = mergeJoinWork.getMergeJoinOperator().getTagForOperator(rs); + if (tag != -1) { + return index; + } else { + index++; + } + } + } + + return -1; + } + + @SuppressWarnings("unchecked") + private Operator getParentFromStack(Node currentMergeJoinOperator, + Stack stack) { + int pos = stack.indexOf(currentMergeJoinOperator); + return (Operator) stack.get(pos - 1); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 714e7d1..ef6d6f7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -479,8 +479,9 @@ import java.util.HashMap; xlateMap.put("KW_SUBQUERY", "SUBQUERY"); xlateMap.put("KW_REWRITE", "REWRITE"); xlateMap.put("KW_UPDATE", "UPDATE"); - xlateMap.put("KW_VALUES", "VALUES"); + xlateMap.put("KW_PURGE", "PURGE"); + // Operators xlateMap.put("DOT", "."); @@ -929,7 +930,7 @@ dropIndexStatement dropTableStatement @init { pushMsg("drop statement", state); } @after { popMsg(state); } - : KW_DROP KW_TABLE ifExists? tableName -> ^(TOK_DROPTABLE tableName ifExists?) + : KW_DROP KW_TABLE ifExists? tableName KW_PURGE? -> ^(TOK_DROPTABLE tableName ifExists? KW_PURGE?) ; alterStatement @@ -945,8 +946,6 @@ alterTableStatementSuffix @init { pushMsg("alter table statement", state); } @after { popMsg(state); } : alterStatementSuffixRename[true] - | alterStatementSuffixAddCol - | alterStatementSuffixRenameCol | alterStatementSuffixUpdateStatsCol | alterStatementSuffixDropPartitions[true] | alterStatementSuffixAddPartitions[true] @@ -974,6 +973,8 @@ alterTblPartitionStatementSuffix | alterStatementSuffixClusterbySortby | alterStatementSuffixCompact | alterStatementSuffixUpdateStatsCol + | alterStatementSuffixRenameCol + | alterStatementSuffixAddCol ; alterStatementPartitionKeyType @@ -2237,7 +2238,7 @@ deleteStatement /*SET = (3 + col2)*/ columnAssignmentClause : - tableOrColumn EQUAL^ atomExpression + tableOrColumn EQUAL^ precedencePlusExpression ; /*SET col1 = 5, col2 = (4 + col4), ...*/ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java index 52c39c0..a24cad9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java @@ -131,7 +131,7 @@ public String getErrorMessage(RecognitionException e, String[] tokenNames) { * so that the graph walking algorithms and the rules framework defined in * ql.lib can be used with the AST Nodes. */ - static final TreeAdaptor adaptor = new CommonTreeAdaptor() { + public static final TreeAdaptor adaptor = new CommonTreeAdaptor() { /** * Creates an ASTNode for the given token. The ASTNode is a wrapper around * antlr's CommonTree class that implements the Node interface. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java index 396553a..373429c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java @@ -111,7 +111,7 @@ private ParseUtils() { * @param tableFieldTypeInfo TypeInfo to convert to * @return Expression converting column to the type specified by tableFieldTypeInfo */ - static ExprNodeDesc createConversionCast(ExprNodeDesc column, PrimitiveTypeInfo tableFieldTypeInfo) + public static ExprNodeDesc createConversionCast(ExprNodeDesc column, PrimitiveTypeInfo tableFieldTypeInfo) throws SemanticException { // Get base type, since type string may be parameterized String baseType = TypeInfoUtils.getBaseName(tableFieldTypeInfo.getTypeName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBMetaData.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBMetaData.java index 66faf1d..0c973ce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QBMetaData.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QBMetaData.java @@ -104,10 +104,18 @@ public Table getDestTableForAlias(String alias) { return nameToDestTable.get(alias.toLowerCase()); } + public Map getNameToDestTable() { + return nameToDestTable; + } + public Partition getDestPartitionForAlias(String alias) { return nameToDestPartition.get(alias.toLowerCase()); } + public Map getNameToDestPartition() { + return nameToDestPartition; + } + public String getDestFileForAlias(String alias) { return nameToDestFile.get(alias.toLowerCase()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java index 2e58b80..9c55379 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RowResolver.java @@ -49,7 +49,7 @@ * The primary(first) mapping is still only held in * invRslvMap. */ - private Map altInvRslvMap; + private final Map altInvRslvMap; private Map expressionMap; // TODO: Refactor this and do in a more object oriented manner @@ -351,4 +351,73 @@ public void setExpressionMap(Map expressionMap) { this.expressionMap = expressionMap; } + + // TODO: 1) How to handle collisions? 2) Should we be cloning ColumnInfo or + // not? + public static int add(RowResolver rrToAddTo, RowResolver rrToAddFrom, + int outputColPos, int numColumns) throws SemanticException { + String tabAlias; + String colAlias; + String[] qualifiedColName; + int i = 0; + + for (ColumnInfo cInfoFrmInput : rrToAddFrom.getRowSchema().getSignature()) { + if ( numColumns >= 0 && i == numColumns ) { + break; + } + ColumnInfo newCI = null; + qualifiedColName = rrToAddFrom.getInvRslvMap().get( + cInfoFrmInput.getInternalName()); + tabAlias = qualifiedColName[0]; + colAlias = qualifiedColName[1]; + + newCI = new ColumnInfo(cInfoFrmInput); + newCI.setInternalName(SemanticAnalyzer + .getColumnInternalName(outputColPos)); + + outputColPos++; + + if (rrToAddTo.get(tabAlias, colAlias) != null) { + LOG.debug("Found duplicate column alias in RR: " + rrToAddTo.get(tabAlias, colAlias)); + } else { + rrToAddTo.put(tabAlias, colAlias, newCI); + } + + qualifiedColName = rrToAddFrom.getAlternateMappings(cInfoFrmInput + .getInternalName()); + if (qualifiedColName != null) { + tabAlias = qualifiedColName[0]; + colAlias = qualifiedColName[1]; + rrToAddTo.put(tabAlias, colAlias, newCI); + } + i++; + } + + return outputColPos; + } + + public static int add(RowResolver rrToAddTo, RowResolver rrToAddFrom, + int outputColPos) throws SemanticException { + return add(rrToAddTo, rrToAddFrom, outputColPos, -1); + } + + /** + * Return a new row resolver that is combination of left RR and right RR. + * The schema will be schema of left, schema of right + * + * @param leftRR + * @param rightRR + * @return + * @throws SemanticException + */ + public static RowResolver getCombinedRR(RowResolver leftRR, + RowResolver rightRR) throws SemanticException { + int outputColPos = 0; + + RowResolver combinedRR = new RowResolver(); + outputColPos = add(combinedRR, leftRR, outputColPos); + outputColPos = add(combinedRR, rightRR, outputColPos); + + return combinedRR; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 41c75ef..19110ce 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -22,8 +22,10 @@ import java.io.IOException; import java.io.Serializable; +import java.math.BigDecimal; import java.util.ArrayList; import java.util.Arrays; +import java.util.BitSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -34,12 +36,20 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; +import com.google.common.annotations.VisibleForTesting; +import net.hydromatic.optiq.SchemaPlus; +import net.hydromatic.optiq.tools.Frameworks; + import org.antlr.runtime.ClassicToken; import org.antlr.runtime.Token; import org.antlr.runtime.tree.Tree; +import org.antlr.runtime.tree.TreeVisitor; +import org.antlr.runtime.tree.TreeVisitorAction; import org.antlr.runtime.tree.TreeWizard; import org.antlr.runtime.tree.TreeWizard.ContextVisitor; import org.apache.commons.lang.StringUtils; @@ -59,6 +69,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryProperties; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; @@ -102,11 +113,35 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.optimizer.Optimizer; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveDefaultRelMetadataProvider; +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveOptiqUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.HiveTypeSystemImpl; +import org.apache.hadoop.hive.ql.optimizer.optiq.OptiqSemanticException; +import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.optiq.TraitsUtil; +import org.apache.hadoop.hive.ql.optimizer.optiq.cost.HiveVolcanoPlanner; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveAggregateRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveFilterRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveJoinRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveProjectRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveSortRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveUnionRel; +import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HivePartitionPrunerRule; +import org.apache.hadoop.hive.ql.optimizer.optiq.rules.HivePushFilterPastJoinRule; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.ASTConverter; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.JoinCondTypeCheckProcFactory; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.JoinTypeCheckCtx; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.RexNodeConverter; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.SqlFunctionConverter; +import org.apache.hadoop.hive.ql.optimizer.optiq.translator.TypeConverter; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.tableSpec.SpecType; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderExpression; import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.OrderSpec; @@ -192,12 +227,74 @@ import org.apache.hadoop.hive.serde2.objectinspector.StandardStructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapred.InputFormat; +import org.eigenbase.rel.AggregateCall; +import org.eigenbase.rel.AggregateRelBase; +import org.eigenbase.rel.Aggregation; +import org.eigenbase.rel.FilterRelBase; +import org.eigenbase.rel.InvalidRelException; +import org.eigenbase.rel.JoinRelBase; +import org.eigenbase.rel.JoinRelType; +import org.eigenbase.rel.RelCollation; +import org.eigenbase.rel.RelCollationImpl; +import org.eigenbase.rel.RelFactories; +import org.eigenbase.rel.RelFieldCollation; +import org.eigenbase.rel.RelNode; +import org.eigenbase.rel.metadata.CachingRelMetadataProvider; +import org.eigenbase.rel.metadata.ChainedRelMetadataProvider; +import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.rel.rules.ConvertMultiJoinRule; +import org.eigenbase.rel.rules.FilterAggregateTransposeRule; +import org.eigenbase.rel.rules.LoptOptimizeJoinRule; +import org.eigenbase.rel.rules.MergeFilterRule; +import org.eigenbase.rel.rules.PushFilterPastProjectRule; +import org.eigenbase.rel.rules.PushFilterPastSetOpRule; +import org.eigenbase.rel.rules.SemiJoinRel; +import org.eigenbase.rel.rules.TransitivePredicatesOnJoinRule; +import org.eigenbase.relopt.RelOptCluster; +import org.eigenbase.relopt.RelOptPlanner; +import org.eigenbase.relopt.RelOptQuery; +import org.eigenbase.relopt.RelOptRule; +import org.eigenbase.relopt.RelOptSchema; +import org.eigenbase.relopt.RelOptUtil; +import org.eigenbase.relopt.RelTraitSet; +import org.eigenbase.relopt.hep.HepMatchOrder; +import org.eigenbase.relopt.hep.HepPlanner; +import org.eigenbase.relopt.hep.HepProgram; +import org.eigenbase.relopt.hep.HepProgramBuilder; +import org.eigenbase.reltype.RelDataType; +import org.eigenbase.reltype.RelDataTypeFactory; +import org.eigenbase.reltype.RelDataTypeField; +import org.eigenbase.rex.RexBuilder; +import org.eigenbase.rex.RexInputRef; +import org.eigenbase.rex.RexNode; +import org.eigenbase.rex.RexWindowBound; +import org.eigenbase.rex.RexFieldCollation; +import org.eigenbase.sql.SqlAggFunction; +import org.eigenbase.sql.SqlWindow; +import org.eigenbase.sql.parser.SqlParserPos; +import org.eigenbase.sql.type.SqlTypeName; +import org.eigenbase.sql2rel.RelFieldTrimmer; +import org.eigenbase.sql.SqlCall; +import org.eigenbase.sql.SqlExplainLevel; +import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlNode; +import org.eigenbase.sql.SqlLiteral; +import org.eigenbase.util.CompositeList; +import org.eigenbase.util.ImmutableIntList; +import org.eigenbase.util.Pair; + +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableList.Builder; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; /** * Implementation of the semantic analyzer. It generates the query plan. @@ -214,6 +311,9 @@ private static final String VALUES_TMP_TABLE_NAME_PREFIX = "Values__Tmp__Table__"; + @VisibleForTesting + static final String ACID_TABLE_PROPERTY = "transactional"; + private HashMap opToPartPruner; private HashMap opToPartList; private HashMap> topOps; @@ -265,6 +365,9 @@ //flag for partial scan during analyze ... compute statistics protected boolean partialscan; + private volatile boolean runCBO = true; + private volatile boolean disableJoinMerge = false; + /* * Capture the CTE definitions in a Query. */ @@ -279,6 +382,11 @@ int nextNum; } + protected SemanticAnalyzer(HiveConf conf, boolean runCBO) throws SemanticException { + this(conf); + this.runCBO = runCBO; + } + public SemanticAnalyzer(HiveConf conf) throws SemanticException { super(conf); opToPartPruner = new HashMap(); @@ -315,8 +423,11 @@ public SemanticAnalyzer(HiveConf conf) throws SemanticException { } @Override - protected void reset() { - super.reset(); + protected void reset(boolean clearPartsCache) { + super.reset(true); + if(clearPartsCache) { + prunedPartitions.clear(); + } loadTableWork.clear(); loadFileWork.clear(); topOps.clear(); @@ -330,7 +441,7 @@ protected void reset() { smbMapJoinContext.clear(); opParseCtx.clear(); groupOpToInputTables.clear(); - prunedPartitions.clear(); + disableJoinMerge = false; aliasToCTEs.clear(); topToTable.clear(); opToPartPruner.clear(); @@ -344,8 +455,6 @@ protected void reset() { viewsExpanded = null; viewSelect = null; ctesExpanded = null; - noscan = false; - partialscan = false; globalLimitCtx.disableOpt(); viewAliasToInput.clear(); reduceSinkOperatorsAddedByEnforceBucketingSorting.clear(); @@ -354,7 +463,6 @@ protected void reset() { unparseTranslator.clear(); queryProperties.clear(); outputs.clear(); - globalLimitCtx.reset(); } public void initParseCtx(ParseContext pctx) { @@ -543,6 +651,10 @@ private void doPhase1GetAllAggregations(ASTNode expressionTree, public static String generateErrorMessage(ASTNode ast, String message) { StringBuilder sb = new StringBuilder(); + if (ast == null) { + sb.append("The abstract syntax tree is null"); + return sb.toString(); + } sb.append(ast.getLine()); sb.append(":"); sb.append(ast.getCharPositionInLine()); @@ -957,9 +1069,7 @@ private void addCTEAsSubQuery(QB qb, String cteName, String cteAlias) throws Sem private boolean isJoinToken(ASTNode node) { if ((node.getToken().getType() == HiveParser.TOK_JOIN) || (node.getToken().getType() == HiveParser.TOK_CROSSJOIN) - || (node.getToken().getType() == HiveParser.TOK_LEFTOUTERJOIN) - || (node.getToken().getType() == HiveParser.TOK_RIGHTOUTERJOIN) - || (node.getToken().getType() == HiveParser.TOK_FULLOUTERJOIN) + || isOuterJoinToken(node) || (node.getToken().getType() == HiveParser.TOK_LEFTSEMIJOIN) || (node.getToken().getType() == HiveParser.TOK_UNIQUEJOIN)) { return true; @@ -968,6 +1078,12 @@ private boolean isJoinToken(ASTNode node) { return false; } + private boolean isOuterJoinToken(ASTNode node) { + return (node.getToken().getType() == HiveParser.TOK_LEFTOUTERJOIN) + || (node.getToken().getType() == HiveParser.TOK_RIGHTOUTERJOIN) + || (node.getToken().getType() == HiveParser.TOK_FULLOUTERJOIN); + } + /** * Given the AST with TOK_JOIN as the root, get all the aliases for the tables * or subqueries in the join. @@ -985,6 +1101,7 @@ private void processJoin(QB qb, ASTNode join) throws SemanticException { "Join with multiple children")); } + queryProperties.incrementJoinCount(isOuterJoinToken(join)); for (int num = 0; num < numChildren; num++) { ASTNode child = (ASTNode) join.getChild(num); if (child.getToken().getType() == HiveParser.TOK_TABREF) { @@ -1091,10 +1208,15 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) qb.countSel(); qbp.setSelExprForClause(ctx_1.dest, ast); + int posn = 0; if (((ASTNode) ast.getChild(0)).getToken().getType() == HiveParser.TOK_HINTLIST) { qbp.setHints((ASTNode) ast.getChild(0)); + posn++; } + if ((ast.getChild(posn).getChild(0).getType() == HiveParser.TOK_TRANSFORM)) + queryProperties.setUsesScript(true); + LinkedHashMap aggregations = doPhase1GetAggregationsFromSelect(ast, qb, ctx_1.dest); doPhase1GetColumnAliasesFromSelect(ast, qbp); @@ -1105,6 +1227,8 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) case HiveParser.TOK_WHERE: qbp.setWhrExprForClause(ctx_1.dest, ast); + if (!SubQueryUtils.findSubQueries((ASTNode) ast.getChild(0)).isEmpty()) + queryProperties.setFilterWithSubQuery(true); break; case HiveParser.TOK_INSERT_INTO: @@ -1127,6 +1251,9 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) } } qbp.setDestForClause(ctx_1.dest, (ASTNode) ast.getChild(0)); + + if (qbp.getClauseNamesForDest().size() > 1) + queryProperties.setMultiDestQuery(true); break; case HiveParser.TOK_FROM: @@ -1150,9 +1277,9 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) processSubQuery(qb, frm); } else if (frm.getToken().getType() == HiveParser.TOK_LATERAL_VIEW || frm.getToken().getType() == HiveParser.TOK_LATERAL_VIEW_OUTER) { + queryProperties.setHasLateralViews(true); processLateralView(qb, frm); } else if (isJoinToken(frm)) { - queryProperties.setHasJoin(true); processJoin(qb, frm); qbp.setJoinExpr(frm); }else if(frm.getToken().getType() == HiveParser.TOK_PTBLFUNCTION){ @@ -1365,6 +1492,10 @@ private void getMetaData(QBExpr qbexpr, ReadEntity parentInput) } } + public Table getTable(TableScanOperator ts) { + return topToTable.get(ts); + } + public void getMetaData(QB qb) throws SemanticException { getMetaData(qb, null); } @@ -1423,11 +1554,20 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException } // Disallow INSERT INTO on bucketized tables + boolean isAcid = isAcidTable(tab); if (qb.getParseInfo().isInsertIntoTable(tab.getDbName(), tab.getTableName()) && - tab.getNumBuckets() > 0 && !isAcidTable(tab)) { + tab.getNumBuckets() > 0 && !isAcid) { throw new SemanticException(ErrorMsg.INSERT_INTO_BUCKETIZED_TABLE. getMsg("Table: " + tab_name)); } + // Disallow update and delete on non-acid tables + if ((updating() || deleting()) && !isAcid) { + // isAcidTable above also checks for whether we are using an acid compliant + // transaction manager. But that has already been caught in + // UpdateDeleteSemanticAnalyzer, so if we are updating or deleting and getting nonAcid + // here, it means the table itself doesn't support it. + throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, tab_name); + } // We check offline of the table, as if people only select from an // non-existing partition of an offline table, the partition won't @@ -1506,6 +1646,10 @@ public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException qb.getParseInfo().addTableSpec(alias, ts); } + + ReadEntity parentViewInfo = PlanUtils.getParentViewInfo(getAliasId(alias, qb), viewAliasToInput); + PlanUtils.addInput(inputs, + new ReadEntity(tab, parentViewInfo, parentViewInfo == null)); } LOG.info("Get metadata for subqueries"); @@ -5757,6 +5901,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) Integer dest_type = qbm.getDestTypeForAlias(dest); Table dest_tab = null; // destination table if any + boolean destTableIsAcid = false; // should the destination table be written to using ACID Partition dest_part = null;// destination partition if any Path queryTmpdir = null; // the intermediate destination directory Path dest_path = null; // the final destination directory @@ -5773,6 +5918,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) case QBMetaData.DEST_TABLE: { dest_tab = qbm.getDestTableForAlias(dest); + destTableIsAcid = isAcidTable(dest_tab); // Is the user trying to insert into a external tables if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_INSERT_INTO_EXTERNAL_TABLES)) && @@ -5868,9 +6014,10 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) // Create the work for moving the table // NOTE: specify Dynamic partitions in dest_tab for WriteEntity if (!isNonNativeTable) { - AcidUtils.Operation acidOp = getAcidType(table_desc.getOutputFileFormatClass()); - if (acidOp != AcidUtils.Operation.NOT_ACID) { - checkAcidConstraints(qb, table_desc); + AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; + if (destTableIsAcid) { + acidOp = getAcidType(table_desc.getOutputFileFormatClass()); + checkAcidConstraints(qb, table_desc, dest_tab); } ltd = new LoadTableDesc(queryTmpdir,table_desc, dpCtx, acidOp); ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), @@ -5928,6 +6075,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_part = qbm.getDestPartitionForAlias(dest); dest_tab = dest_part.getTable(); + destTableIsAcid = isAcidTable(dest_tab); if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_INSERT_INTO_EXTERNAL_TABLES)) && dest_tab.getTableType().equals(TableType.EXTERNAL_TABLE)) { throw new SemanticException( @@ -5975,9 +6123,10 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) lbCtx = constructListBucketingCtx(dest_part.getSkewedColNames(), dest_part.getSkewedColValues(), dest_part.getSkewedColValueLocationMaps(), dest_part.isStoredAsSubDirectories(), conf); - AcidUtils.Operation acidOp = getAcidType(table_desc.getOutputFileFormatClass()); - if (acidOp != AcidUtils.Operation.NOT_ACID) { - checkAcidConstraints(qb, table_desc); + AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; + if (destTableIsAcid) { + acidOp = getAcidType(table_desc.getOutputFileFormatClass()); + checkAcidConstraints(qb, table_desc, dest_tab); } ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp); ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), @@ -6132,9 +6281,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) ArrayList vecCol = new ArrayList(); if (updating() || deleting()) { - vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(), - //TypeInfoUtils.getTypeInfoFromObjectInspector(VirtualColumn.ROWID.getObjectInspector()), - VirtualColumn.ROWID.getTypeInfo(), + vecCol.add(new ColumnInfo(VirtualColumn.ROWID.getName(), VirtualColumn.ROWID.getTypeInfo(), "", true)); } else { try { @@ -6163,8 +6310,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCESORTING)))); // If this table is working with ACID semantics, turn off merging - boolean acidTable = isAcidTable(dest_tab); - canBeMerged &= !acidTable; + canBeMerged &= !destTableIsAcid; FileSinkDesc fileSinkDesc = new FileSinkDesc( queryTmpdir, @@ -6180,7 +6326,7 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) // If this is an insert, update, or delete on an ACID table then mark that so the // FileSinkOperator knows how to properly write to it. - if (acidTable) { + if (destTableIsAcid) { AcidUtils.Operation wt = updating() ? AcidUtils.Operation.UPDATE : (deleting() ? AcidUtils.Operation.DELETE : AcidUtils.Operation.INSERT); fileSinkDesc.setWriteType(wt); @@ -6241,9 +6387,12 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) // * no insert overwrites // * no use of vectorization // * turns off reduce deduplication optimization, as that sometimes breaks acid + // * Check that the table is bucketed + // * Check that the table is not sorted // This method assumes you have already decided that this is an Acid write. Don't call it if // that isn't true. - private void checkAcidConstraints(QB qb, TableDesc tableDesc) throws SemanticException { + private void checkAcidConstraints(QB qb, TableDesc tableDesc, + Table table) throws SemanticException { String tableName = tableDesc.getTableName(); if (!qb.getParseInfo().isInsertIntoTable(tableName)) { LOG.debug("Couldn't find table " + tableName + " in insertIntoTable"); @@ -6256,6 +6405,17 @@ private void checkAcidConstraints(QB qb, TableDesc tableDesc) throws SemanticExc LOG.info("Modifying config values for ACID write"); conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, false); conf.setBoolVar(ConfVars.HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES, true); + conf.set(AcidUtils.CONF_ACID_KEY, "true"); + + if (table.getNumBuckets() < 1) { + throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName()); + } + if (table.getSortCols() != null && table.getSortCols().size() > 0) { + throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName()); + } + + + } /** @@ -6318,6 +6478,7 @@ Operator genConversionSelectOperator(String dest, QB qb, Operator input, int columnNumber = tableFields.size(); ArrayList expressions = new ArrayList( columnNumber); + // MetadataTypedColumnsetSerDe does not need type conversions because it // does the conversion to String by itself. boolean isMetaDataSerDe = table_desc.getDeserializerClass().equals( @@ -6385,17 +6546,19 @@ Operator genConversionSelectOperator(String dest, QB qb, Operator input, if (converted) { // add the select operator RowResolver rowResolver = new RowResolver(); - ArrayList colName = new ArrayList(); + ArrayList colNames = new ArrayList(); + Map colExprMap = new HashMap(); for (int i = 0; i < expressions.size(); i++) { String name = getColumnInternalName(i); rowResolver.put("", name, new ColumnInfo(name, expressions.get(i) .getTypeInfo(), "", false)); - colName.add(name); + colNames.add(name); + colExprMap.put(name, expressions.get(i)); } Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new SelectDesc(expressions, colName), new RowSchema(rowResolver + new SelectDesc(expressions, colNames), new RowSchema(rowResolver .getColumnInfos()), input), rowResolver); - + output.setColumnExprMap(colExprMap); return output; } else { // not converted @@ -7892,7 +8055,7 @@ private void mergeJoins(QB qb, QBJoinTree node, QBJoinTree target, int pos, int[ List nodeConds = node.getExpressions().get(i + 1); ArrayList reordereNodeConds = new ArrayList(); for(int k=0; k < tgtToNodeExprMap.length; k++) { - reordereNodeConds.add(nodeConds.get(k)); + reordereNodeConds.add(nodeConds.get(tgtToNodeExprMap[k])); } expr.add(reordereNodeConds); } @@ -9444,7 +9607,9 @@ public Operator genPlan(QB qb) throws SemanticException { aliasToOpInfo ); } } - mergeJoinTree(qb); + + if (!disableJoinMerge) + mergeJoinTree(qb); } // if any filters are present in the join tree, push them on top of the @@ -9652,9 +9817,9 @@ public Phase1Ctx initPhase1Ctx() { } @Override - public void init() { + public void init(boolean clearPartsCache) { // clear most members - reset(); + reset(clearPartsCache); // init QB qb = new QB(null, null, false); @@ -9709,11 +9874,82 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { getMetaData(qb); LOG.info("Completed getting MetaData in Semantic Analysis"); + + if (runCBO) { + boolean tokenTypeIsQuery = ast.getToken().getType() == HiveParser.TOK_QUERY + || ast.getToken().getType() == HiveParser.TOK_EXPLAIN; + if (!tokenTypeIsQuery || createVwDesc != null + || !HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_ENABLED) + || !canHandleQuery(qb, true) || !HiveOptiqUtil.validateASTForCBO(ast)) { + runCBO = false; + } + + if (runCBO) { + disableJoinMerge = true; + } + } + // Save the result schema derived from the sink operator produced // by genPlan. This has the correct column names, which clients // such as JDBC would prefer instead of the c0, c1 we'll end // up with later. - Operator sinkOp = genPlan(qb); + Operator sinkOp = null; + + if (runCBO) { + OptiqBasedPlanner optiqPlanner = new OptiqBasedPlanner(); + boolean reAnalyzeAST = false; + + try { + // 1. Gen Optimized AST + ASTNode newAST = optiqPlanner.getOptimizedAST(prunedPartitions); + + // 2. Regen OP plan from optimized AST + init(false); + ctx_1 = initPhase1Ctx(); + if (!doPhase1(newAST, qb, ctx_1)) { + throw new RuntimeException( + "Couldn't do phase1 on CBO optimized query plan"); + } + // unfortunately making prunedPartitions immutable is not possible here + // with SemiJoins not all tables are costed in CBO, + // so their PartitionList is not evaluated until the run phase. + //prunedPartitions = ImmutableMap.copyOf(prunedPartitions); + getMetaData(qb); + + disableJoinMerge = true; + sinkOp = genPlan(qb); + LOG.info("CBO Succeeded; optimized logical plan."); + LOG.debug(newAST.dump()); + + /* + * Use non CBO Result Set Schema so as to preserve user specified names. + * Hive seems to have bugs with OB/LIMIT in sub queries. // 3. Reset + * result set schema resultSchema = + * convertRowSchemaToResultSetSchema(opParseCtx.get(sinkOp) + * .getRowResolver(), true); + */ + } catch (Exception e) { + LOG.error("CBO failed, skipping CBO. ", e); + if (!conf.getBoolVar(ConfVars.HIVE_IN_TEST) || + (optiqPlanner.noColsMissingStats.get() > 0) || + e instanceof OptiqSemanticException) { + reAnalyzeAST = true; + } else { + throw e instanceof SemanticException ? (SemanticException) e : new SemanticException(e); + } + } finally { + runCBO = false; + disableJoinMerge = false; + if (reAnalyzeAST) { + init(true); + prunedPartitions.clear(); + analyzeInternal(ast); + return; + } + } + } else { + sinkOp = genPlan(qb); + } if (createVwDesc != null) resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver()); @@ -10845,9 +11081,13 @@ private void validateAnalyzeNoscan(ASTNode tree) throws SemanticException { Table tbl; try { tbl = db.getTable(tableName); - } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); + } catch (InvalidTableException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); + } + catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); } + /* noscan uses hdfs apis to retrieve such information from Namenode. */ /* But that will be specific to hdfs. Through storagehandler mechanism, */ /* storage of table could be on any storage system: hbase, cassandra etc. */ @@ -10870,8 +11110,10 @@ private void validateAnalyzePartialscan(ASTNode tree) throws SemanticException { Table tbl; try { tbl = db.getTable(tableName); + } catch (InvalidTableException e) { + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName), e); } catch (HiveException e) { - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(tableName)); + throw new SemanticException(e.getMessage(), e); } /* partialscan uses hdfs apis to retrieve such information from Namenode. */ /* But that will be specific to hdfs. Through storagehandler mechanism, */ @@ -11912,9 +12154,9 @@ else return (ltd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : // Even if the table is of Acid type, if we aren't working with an Acid compliant TxnManager // then return false. private boolean isAcidTable(Table tab) { - if (tab == null || tab.getOutputFormatClass() == null) return false; + if (tab == null) return false; if (!SessionState.get().getTxnMgr().supportsAcid()) return false; - return isAcidOutputFormat(tab.getOutputFormatClass()); + return tab.getProperty(ACID_TABLE_PROPERTY) != null; } private boolean isAcidOutputFormat(Class of) { @@ -11953,4 +12195,2032 @@ protected boolean deleting() { return false; } + /**** Temporary Place Holder For Optiq plan Gen, Optimizer ****/ + + /* + * Entry point to Optimizations using Optiq. + */ + private boolean canHandleQuery(QB qbToChk, boolean topLevelQB) { + boolean runOptiqPlanner = false; + // Assumption: + // 1. If top level QB is query then everything below it must also be Query + // 2. Nested Subquery will return false for qbToChk.getIsQuery() + if ((!topLevelQB || qbToChk.getIsQuery()) + && (!conf.getBoolVar(ConfVars.HIVE_IN_TEST) || conf.getVar(ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("nonstrict")) + && (!topLevelQB || (queryProperties.getJoinCount() > 1) || conf.getBoolVar(ConfVars.HIVE_IN_TEST)) + && !queryProperties.hasClusterBy() && !queryProperties.hasDistributeBy() + && !queryProperties.hasSortBy() && !queryProperties.hasPTF() + && !queryProperties.usesScript() && !queryProperties.hasMultiDestQuery() + && !queryProperties.hasLateralViews()) { + runOptiqPlanner = true; + } else { + LOG.info("Can not invoke CBO; query contains operators not supported for CBO."); + } + + return runOptiqPlanner; + } + + private class OptiqBasedPlanner implements Frameworks.PlannerAction { + private RelOptCluster cluster; + private RelOptSchema relOptSchema; + private SemanticException semanticException; + private Map partitionCache; + private AtomicInteger noColsMissingStats = new AtomicInteger(0); + List topLevelFieldSchema; + + // TODO: Do we need to keep track of RR, ColNameToPosMap for every op or + // just last one. + LinkedHashMap relToHiveRR = new LinkedHashMap(); + LinkedHashMap> relToHiveColNameOptiqPosMap = new LinkedHashMap>(); + + private ASTNode getOptimizedAST(Map partitionCache) + throws SemanticException { + ASTNode optiqOptimizedAST = null; + RelNode optimizedOptiqPlan = null; + this.partitionCache = partitionCache; + + try { + optimizedOptiqPlan = Frameworks.withPlanner(this, + Frameworks.newConfigBuilder().typeSystem(new HiveTypeSystemImpl()).build()); + } catch (Exception e) { + if (semanticException != null) + throw semanticException; + else + throw new RuntimeException(e); + } + optiqOptimizedAST = ASTConverter.convert(optimizedOptiqPlan, topLevelFieldSchema); + + return optiqOptimizedAST; + } + + @Override + public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlus rootSchema) { + RelNode optiqGenPlan = null; + RelNode optiqPreCboPlan = null; + RelNode optiqOptimizedPlan = null; + + /* + * recreate cluster, so that it picks up the additional traitDef + */ + RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(); + final RelOptQuery query = new RelOptQuery(planner); + final RexBuilder rexBuilder = cluster.getRexBuilder(); + cluster = query.createCluster(rexBuilder.getTypeFactory(), rexBuilder); + + this.cluster = cluster; + this.relOptSchema = relOptSchema; + + try { + optiqGenPlan = genLogicalPlan(qb, true); + topLevelFieldSchema = convertRowSchemaToResultSetSchema(relToHiveRR.get(optiqGenPlan), + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES)); + } catch (SemanticException e) { + semanticException = e; + throw new RuntimeException(e); + } + + optiqPreCboPlan = applyPreCBOTransforms(optiqGenPlan, HiveDefaultRelMetadataProvider.INSTANCE); + List list = Lists.newArrayList(); + list.add(HiveDefaultRelMetadataProvider.INSTANCE); + RelTraitSet desiredTraits = cluster.traitSetOf(HiveRel.CONVENTION, RelCollationImpl.EMPTY); + + HepProgram hepPgm = null; + HepProgramBuilder hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP) + .addRuleInstance(new ConvertMultiJoinRule(HiveJoinRel.class)); + hepPgmBldr.addRuleInstance(new LoptOptimizeJoinRule(HiveJoinRel.HIVE_JOIN_FACTORY, + HiveProjectRel.DEFAULT_PROJECT_FACTORY, HiveFilterRel.DEFAULT_FILTER_FACTORY)); + + hepPgm = hepPgmBldr.build(); + HepPlanner hepPlanner = new HepPlanner(hepPgm); + + hepPlanner.registerMetadataProviders(list); + RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); + cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner)); + + RelNode rootRel = optiqPreCboPlan; + hepPlanner.setRoot(rootRel); + if (!optiqPreCboPlan.getTraitSet().equals(desiredTraits)) { + rootRel = hepPlanner.changeTraits(optiqPreCboPlan, desiredTraits); + } + hepPlanner.setRoot(rootRel); + + optiqOptimizedPlan = hepPlanner.findBestExp(); + + if (LOG.isDebugEnabled() && !conf.getBoolVar(ConfVars.HIVE_IN_TEST)) { + LOG.debug("CBO Planning details:\n"); + LOG.debug("Original Plan:\n"); + LOG.debug(RelOptUtil.toString(optiqGenPlan)); + LOG.debug("Plan After PPD, PartPruning, ColumnPruning:\n"); + LOG.debug(RelOptUtil.toString(optiqPreCboPlan)); + LOG.debug("Plan After Join Reordering:\n"); + LOG.debug(RelOptUtil.toString(optiqOptimizedPlan, SqlExplainLevel.ALL_ATTRIBUTES)); + } + + return optiqOptimizedPlan; + } + + public RelNode applyPreCBOTransforms(RelNode basePlan, RelMetadataProvider mdProvider) { + + // TODO: Decorelation of subquery should be done before attempting + // Partition Pruning; otherwise Expression evaluation may try to execute + // corelated sub query. + basePlan = hepPlan(basePlan, true, mdProvider, new PushFilterPastProjectRule( + FilterRelBase.class, HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveProjectRel.class, + HiveProjectRel.DEFAULT_PROJECT_FACTORY), new PushFilterPastSetOpRule( + HiveFilterRel.DEFAULT_FILTER_FACTORY), new MergeFilterRule( + HiveFilterRel.DEFAULT_FILTER_FACTORY), HivePushFilterPastJoinRule.JOIN, + HivePushFilterPastJoinRule.FILTER_ON_JOIN, + new FilterAggregateTransposeRule( + FilterRelBase.class, + HiveFilterRel.DEFAULT_FILTER_FACTORY, + AggregateRelBase.class)); + + basePlan = hepPlan(basePlan, false, mdProvider, new TransitivePredicatesOnJoinRule( + JoinRelBase.class, HiveFilterRel.DEFAULT_FILTER_FACTORY), + // TODO: Enable it after OPTIQ-407 is fixed + //RemoveTrivialProjectRule.INSTANCE, + new HivePartitionPrunerRule(SemanticAnalyzer.this.conf)); + + RelFieldTrimmer fieldTrimmer = new RelFieldTrimmer(null, HiveProjectRel.DEFAULT_PROJECT_FACTORY, + HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveJoinRel.HIVE_JOIN_FACTORY, RelFactories.DEFAULT_SEMI_JOIN_FACTORY, + HiveSortRel.HIVE_SORT_REL_FACTORY, HiveAggregateRel.HIVE_AGGR_REL_FACTORY, HiveUnionRel.UNION_REL_FACTORY); + basePlan = fieldTrimmer.trim(basePlan); + + basePlan = hepPlan(basePlan, true, mdProvider, + new PushFilterPastProjectRule(FilterRelBase.class, + HiveFilterRel.DEFAULT_FILTER_FACTORY, HiveProjectRel.class, + HiveProjectRel.DEFAULT_PROJECT_FACTORY)); + + return basePlan; + } + + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, + RelMetadataProvider mdProvider, RelOptRule... rules) { + + RelNode optimizedRelNode = basePlan; + HepProgramBuilder programBuilder = new HepProgramBuilder(); + if (followPlanChanges) { + programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN); + programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules)); + } else { + // TODO: Should this be also TOP_DOWN? + for (RelOptRule r : rules) + programBuilder.addRuleInstance(r); + } + + HepPlanner planner = new HepPlanner(programBuilder.build()); + List list = Lists.newArrayList(); + list.add(mdProvider); + planner.registerMetadataProviders(list); + RelMetadataProvider chainedProvider = ChainedRelMetadataProvider.of(list); + basePlan.getCluster().setMetadataProvider( + new CachingRelMetadataProvider(chainedProvider, planner)); + + planner.setRoot(basePlan); + optimizedRelNode = planner.findBestExp(); + + return optimizedRelNode; + } + + @SuppressWarnings("nls") + private RelNode genUnionLogicalPlan(String unionalias, String leftalias, RelNode leftRel, + String rightalias, RelNode rightRel) throws SemanticException { + HiveUnionRel unionRel = null; + + // 1. Get Row Resolvers, Column map for original left and right input of + // Union Rel + RowResolver leftRR = this.relToHiveRR.get(leftRel); + RowResolver rightRR = this.relToHiveRR.get(rightRel); + HashMap leftmap = leftRR.getFieldMap(leftalias); + HashMap rightmap = rightRR.getFieldMap(rightalias); + + // 2. Validate that Union is feasible according to Hive (by using type + // info from RR) + if (leftmap.size() != rightmap.size()) { + throw new SemanticException("Schema of both sides of union should match."); + } + + ASTNode tabref = qb.getAliases().isEmpty() ? null : qb.getParseInfo().getSrcForAlias( + qb.getAliases().get(0)); + for (Map.Entry lEntry : leftmap.entrySet()) { + String field = lEntry.getKey(); + ColumnInfo lInfo = lEntry.getValue(); + ColumnInfo rInfo = rightmap.get(field); + if (rInfo == null) { + throw new SemanticException(generateErrorMessage(tabref, + "Schema of both sides of union should match. " + rightalias + + " does not have the field " + field)); + } + if (lInfo == null) { + throw new SemanticException(generateErrorMessage(tabref, + "Schema of both sides of union should match. " + leftalias + + " does not have the field " + field)); + } + if (!lInfo.getInternalName().equals(rInfo.getInternalName())) { + throw new SemanticException(generateErrorMessage(tabref, + "Schema of both sides of union should match: field " + field + ":" + + " appears on the left side of the UNION at column position: " + + getPositionFromInternalName(lInfo.getInternalName()) + + ", and on the right side of the UNION at column position: " + + getPositionFromInternalName(rInfo.getInternalName()) + + ". Column positions should match for a UNION")); + } + // try widening coversion, otherwise fail union + TypeInfo commonTypeInfo = FunctionRegistry.getCommonClassForUnionAll(lInfo.getType(), + rInfo.getType()); + if (commonTypeInfo == null) { + throw new SemanticException(generateErrorMessage(tabref, + "Schema of both sides of union should match: Column " + field + " is of type " + + lInfo.getType().getTypeName() + " on first table and type " + + rInfo.getType().getTypeName() + " on second table")); + } + } + + // 3. construct Union Output RR using original left & right Input + RowResolver unionoutRR = new RowResolver(); + for (Map.Entry lEntry : leftmap.entrySet()) { + String field = lEntry.getKey(); + ColumnInfo lInfo = lEntry.getValue(); + ColumnInfo rInfo = rightmap.get(field); + ColumnInfo unionColInfo = new ColumnInfo(lInfo); + unionColInfo.setTabAlias(unionalias); + unionColInfo.setType(FunctionRegistry.getCommonClassForUnionAll(lInfo.getType(), + rInfo.getType())); + unionoutRR.put(unionalias, field, unionColInfo); + } + + // 4. Determine which columns requires cast on left/right input (Optiq + // requires exact types on both sides of union) + boolean leftNeedsTypeCast = false; + boolean rightNeedsTypeCast = false; + List leftProjs = new ArrayList(); + List rightProjs = new ArrayList(); + List leftRowDT = leftRel.getRowType().getFieldList(); + List rightRowDT = rightRel.getRowType().getFieldList(); + + RelDataType leftFieldDT; + RelDataType rightFieldDT; + RelDataType unionFieldDT; + for (int i = 0; i < leftRowDT.size(); i++) { + leftFieldDT = leftRowDT.get(i).getType(); + rightFieldDT = rightRowDT.get(i).getType(); + if (!leftFieldDT.equals(rightFieldDT)) { + unionFieldDT = TypeConverter.convert(unionoutRR.getColumnInfos().get(i).getType(), + cluster.getTypeFactory()); + if (!unionFieldDT.equals(leftFieldDT)) { + leftNeedsTypeCast = true; + } + leftProjs.add(cluster.getRexBuilder().ensureType(unionFieldDT, + cluster.getRexBuilder().makeInputRef(leftFieldDT, i), true)); + + if (!unionFieldDT.equals(rightFieldDT)) { + rightNeedsTypeCast = true; + } + rightProjs.add(cluster.getRexBuilder().ensureType(unionFieldDT, + cluster.getRexBuilder().makeInputRef(rightFieldDT, i), true)); + } else { + leftProjs.add(cluster.getRexBuilder().ensureType(leftFieldDT, + cluster.getRexBuilder().makeInputRef(leftFieldDT, i), true)); + rightProjs.add(cluster.getRexBuilder().ensureType(rightFieldDT, + cluster.getRexBuilder().makeInputRef(rightFieldDT, i), true)); + } + } + + // 5. Introduce Project Rel above original left/right inputs if cast is + // needed for type parity + RelNode unionLeftInput = leftRel; + RelNode unionRightInput = rightRel; + if (leftNeedsTypeCast) { + unionLeftInput = HiveProjectRel.create(leftRel, leftProjs, leftRel.getRowType() + .getFieldNames()); + } + if (rightNeedsTypeCast) { + unionRightInput = HiveProjectRel.create(rightRel, rightProjs, rightRel.getRowType() + .getFieldNames()); + } + + // 6. Construct Union Rel + ImmutableList.Builder bldr = new ImmutableList.Builder(); + bldr.add(unionLeftInput); + bldr.add(unionRightInput); + unionRel = new HiveUnionRel(cluster, TraitsUtil.getDefaultTraitSet(cluster), + bldr.build()); + + relToHiveRR.put(unionRel, unionoutRR); + relToHiveColNameOptiqPosMap.put(unionRel, + this.buildHiveToOptiqColumnMap(unionoutRR, unionRel)); + + return unionRel; + } + + private RelNode genJoinRelNode(RelNode leftRel, RelNode rightRel, JoinType hiveJoinType, + ASTNode joinCond) throws SemanticException { + RelNode joinRel = null; + + // 1. construct the RowResolver for the new Join Node by combining row + // resolvers from left, right + RowResolver leftRR = this.relToHiveRR.get(leftRel); + RowResolver rightRR = this.relToHiveRR.get(rightRel); + RowResolver joinRR = null; + + if (hiveJoinType != JoinType.LEFTSEMI) { + joinRR = RowResolver.getCombinedRR(leftRR, rightRR); + } else { + joinRR = new RowResolver(); + RowResolver.add(joinRR, leftRR, 0); + } + + // 2. Construct ExpressionNodeDesc representing Join Condition + RexNode optiqJoinCond = null; + if (joinCond != null) { + JoinTypeCheckCtx jCtx = new JoinTypeCheckCtx(leftRR, rightRR, hiveJoinType); + Map exprNodes = JoinCondTypeCheckProcFactory.genExprNode(joinCond, + jCtx); + if (jCtx.getError() != null) + throw new SemanticException(SemanticAnalyzer.generateErrorMessage(jCtx.getErrorSrcNode(), + jCtx.getError())); + + ExprNodeDesc joinCondnExprNode = exprNodes.get(joinCond); + + List inputRels = new ArrayList(); + inputRels.add(leftRel); + inputRels.add(rightRel); + optiqJoinCond = RexNodeConverter.convert(cluster, joinCondnExprNode, inputRels, + relToHiveRR, relToHiveColNameOptiqPosMap, false); + } else { + optiqJoinCond = cluster.getRexBuilder().makeLiteral(true); + } + + // 3. Validate that join condition is legal (i.e no function refering to + // both sides of join, only equi join) + // TODO: Join filter handling (only supported for OJ by runtime or is it + // supported for IJ as well) + + // 4. Construct Join Rel Node + boolean leftSemiJoin = false; + JoinRelType optiqJoinType; + switch (hiveJoinType) { + case LEFTOUTER: + optiqJoinType = JoinRelType.LEFT; + break; + case RIGHTOUTER: + optiqJoinType = JoinRelType.RIGHT; + break; + case FULLOUTER: + optiqJoinType = JoinRelType.FULL; + break; + case LEFTSEMI: + optiqJoinType = JoinRelType.INNER; + leftSemiJoin = true; + break; + case INNER: + default: + optiqJoinType = JoinRelType.INNER; + break; + } + + if (leftSemiJoin) { + List sysFieldList = new ArrayList(); + List leftJoinKeys = new ArrayList(); + List rightJoinKeys = new ArrayList(); + + RexNode nonEquiConds = RelOptUtil.splitJoinCondition(sysFieldList, leftRel, rightRel, + optiqJoinCond, leftJoinKeys, rightJoinKeys, null, null); + + if (!nonEquiConds.isAlwaysTrue()) { + throw new SemanticException("Non equality condition not supported in Semi-Join" + + nonEquiConds); + } + + RelNode[] inputRels = new RelNode[] { leftRel, rightRel }; + final List leftKeys = new ArrayList(); + final List rightKeys = new ArrayList(); + optiqJoinCond = HiveOptiqUtil.projectNonColumnEquiConditions( + HiveProjectRel.DEFAULT_PROJECT_FACTORY, inputRels, leftJoinKeys, rightJoinKeys, 0, + leftKeys, rightKeys); + + joinRel = new SemiJoinRel(cluster, cluster.traitSetOf(HiveRel.CONVENTION), + inputRels[0], inputRels[1], optiqJoinCond, ImmutableIntList.copyOf(leftKeys), + ImmutableIntList.copyOf(rightKeys)); + } else { + joinRel = HiveJoinRel.getJoin(cluster, leftRel, rightRel, optiqJoinCond, optiqJoinType, + leftSemiJoin); + } + // 5. Add new JoinRel & its RR to the maps + relToHiveColNameOptiqPosMap.put(joinRel, this.buildHiveToOptiqColumnMap(joinRR, joinRel)); + relToHiveRR.put(joinRel, joinRR); + + return joinRel; + } + + /** + * Generate Join Logical Plan Relnode by walking through the join AST. + * + * @param qb + * @param aliasToRel + * Alias(Table/Relation alias) to RelNode; only read and not + * written in to by this method + * @return + * @throws SemanticException + */ + private RelNode genJoinLogicalPlan(ASTNode joinParseTree, Map aliasToRel) + throws SemanticException { + RelNode leftRel = null; + RelNode rightRel = null; + JoinType hiveJoinType = null; + + if (joinParseTree.getToken().getType() == HiveParser.TOK_UNIQUEJOIN) { + String msg = String.format("UNIQUE JOIN is currently not supported in CBO," + + " turn off cbo to use UNIQUE JOIN."); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + + // 1. Determine Join Type + // TODO: What about TOK_CROSSJOIN, TOK_MAPJOIN + switch (joinParseTree.getToken().getType()) { + case HiveParser.TOK_LEFTOUTERJOIN: + hiveJoinType = JoinType.LEFTOUTER; + break; + case HiveParser.TOK_RIGHTOUTERJOIN: + hiveJoinType = JoinType.RIGHTOUTER; + break; + case HiveParser.TOK_FULLOUTERJOIN: + hiveJoinType = JoinType.FULLOUTER; + break; + case HiveParser.TOK_LEFTSEMIJOIN: + hiveJoinType = JoinType.LEFTSEMI; + break; + default: + hiveJoinType = JoinType.INNER; + break; + } + + // 2. Get Left Table Alias + ASTNode left = (ASTNode) joinParseTree.getChild(0); + if ((left.getToken().getType() == HiveParser.TOK_TABREF) + || (left.getToken().getType() == HiveParser.TOK_SUBQUERY) + || (left.getToken().getType() == HiveParser.TOK_PTBLFUNCTION)) { + String tableName = getUnescapedUnqualifiedTableName((ASTNode) left.getChild(0)) + .toLowerCase(); + String leftTableAlias = left.getChildCount() == 1 ? tableName : unescapeIdentifier(left + .getChild(left.getChildCount() - 1).getText().toLowerCase()); + // ptf node form is: ^(TOK_PTBLFUNCTION $name $alias? + // partitionTableFunctionSource partitioningSpec? expression*) + // guranteed to have an lias here: check done in processJoin + leftTableAlias = (left.getToken().getType() == HiveParser.TOK_PTBLFUNCTION) ? unescapeIdentifier(left + .getChild(1).getText().toLowerCase()) + : leftTableAlias; + leftRel = aliasToRel.get(leftTableAlias); + } else if (isJoinToken(left)) { + leftRel = genJoinLogicalPlan(left, aliasToRel); + } else { + assert (false); + } + + // 3. Get Right Table Alias + ASTNode right = (ASTNode) joinParseTree.getChild(1); + if ((right.getToken().getType() == HiveParser.TOK_TABREF) + || (right.getToken().getType() == HiveParser.TOK_SUBQUERY) + || (right.getToken().getType() == HiveParser.TOK_PTBLFUNCTION)) { + String tableName = getUnescapedUnqualifiedTableName((ASTNode) right.getChild(0)) + .toLowerCase(); + String rightTableAlias = right.getChildCount() == 1 ? tableName : unescapeIdentifier(right + .getChild(right.getChildCount() - 1).getText().toLowerCase()); + // ptf node form is: ^(TOK_PTBLFUNCTION $name $alias? + // partitionTableFunctionSource partitioningSpec? expression*) + // guranteed to have an lias here: check done in processJoin + rightTableAlias = (right.getToken().getType() == HiveParser.TOK_PTBLFUNCTION) ? unescapeIdentifier(right + .getChild(1).getText().toLowerCase()) + : rightTableAlias; + rightRel = aliasToRel.get(rightTableAlias); + } else { + assert (false); + } + + // 4. Get Join Condn + ASTNode joinCond = (ASTNode) joinParseTree.getChild(2); + + // 5. Create Join rel + return genJoinRelNode(leftRel, rightRel, hiveJoinType, joinCond); + } + + private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticException { + RowResolver rr = new RowResolver(); + HiveTableScanRel tableRel = null; + + try { + + // 1. If the table has a Sample specified, bail from Optiq path. + if ( qb.getParseInfo().getTabSample(tableAlias) != null || + SemanticAnalyzer.this.nameToSplitSample.containsKey(tableAlias)) { + String msg = String.format("Table Sample specified for %s." + + " Currently we don't support Table Sample clauses in CBO," + + " turn off cbo for queries on tableSamples.", tableAlias); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + + // 2. Get Table Metadata + Table tab = qb.getMetaData().getSrcForAlias(tableAlias); + + // 3. Get Table Logical Schema (Row Type) + // NOTE: Table logical schema = Non Partition Cols + Partition Cols + + // Virtual Cols + + // 3.1 Add Column info for non partion cols (Object Inspector fields) + StructObjectInspector rowObjectInspector = (StructObjectInspector) tab.getDeserializer() + .getObjectInspector(); + List fields = rowObjectInspector.getAllStructFieldRefs(); + ColumnInfo colInfo; + String colName; + ArrayList cInfoLst = new ArrayList(); + for (int i = 0; i < fields.size(); i++) { + colName = fields.get(i).getFieldName(); + colInfo = new ColumnInfo( + fields.get(i).getFieldName(), + TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), + tableAlias, false); + colInfo.setSkewedCol((isSkewedCol(tableAlias, qb, colName)) ? true : false); + rr.put(tableAlias, colName, colInfo); + cInfoLst.add(colInfo); + } + // TODO: Fix this + ArrayList nonPartitionColumns = new ArrayList(cInfoLst); + ArrayList partitionColumns = new ArrayList(); + + // 3.2 Add column info corresponding to partition columns + for (FieldSchema part_col : tab.getPartCols()) { + colName = part_col.getName(); + colInfo = new ColumnInfo(colName, + TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), tableAlias, true); + rr.put(tableAlias, colName, colInfo); + cInfoLst.add(colInfo); + partitionColumns.add(colInfo); + } + + // 3.3 Add column info corresponding to virtual columns + Iterator vcs = VirtualColumn.getRegistry(conf).iterator(); + while (vcs.hasNext()) { + VirtualColumn vc = vcs.next(); + colInfo = new ColumnInfo(vc.getName(), vc.getTypeInfo(), tableAlias, true, + vc.getIsHidden()); + rr.put(tableAlias, vc.getName(), colInfo); + cInfoLst.add(colInfo); + } + + // 3.4 Build row type from field + RelDataType rowType = TypeConverter.getType(cluster, rr, null); + + // 4. Build RelOptAbstractTable + String fullyQualifiedTabName = tab.getDbName(); + if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) + fullyQualifiedTabName = fullyQualifiedTabName + "." + tab.getTableName(); + else + fullyQualifiedTabName = tab.getTableName(); + RelOptHiveTable optTable = new RelOptHiveTable(relOptSchema, fullyQualifiedTabName, + tableAlias, rowType, tab, nonPartitionColumns, partitionColumns, conf, partitionCache, + noColsMissingStats); + + // 5. Build Hive Table Scan Rel + tableRel = new HiveTableScanRel(cluster, cluster.traitSetOf(HiveRel.CONVENTION), optTable, + rowType); + + // 6. Add Schema(RR) to RelNode-Schema map + ImmutableMap hiveToOptiqColMap = buildHiveToOptiqColumnMap(rr, tableRel); + relToHiveRR.put(tableRel, rr); + relToHiveColNameOptiqPosMap.put(tableRel, hiveToOptiqColMap); + } catch (Exception e) { + if (e instanceof SemanticException) { + throw (SemanticException) e; + } else { + throw (new RuntimeException(e)); + } + } + + return tableRel; + } + + private RelNode genFilterRelNode(ASTNode filterExpr, RelNode srcRel) throws SemanticException { + ExprNodeDesc filterCondn = genExprNodeDesc(filterExpr, relToHiveRR.get(srcRel)); + ImmutableMap hiveColNameOptiqPosMap = this.relToHiveColNameOptiqPosMap + .get(srcRel); + RexNode convertedFilterExpr = new RexNodeConverter(cluster, srcRel.getRowType(), + hiveColNameOptiqPosMap, 0, true).convert(filterCondn); + RelNode filterRel = new HiveFilterRel(cluster, cluster.traitSetOf(HiveRel.CONVENTION), + srcRel, convertedFilterExpr); + this.relToHiveColNameOptiqPosMap.put(filterRel, hiveColNameOptiqPosMap); + relToHiveRR.put(filterRel, relToHiveRR.get(srcRel)); + relToHiveColNameOptiqPosMap.put(filterRel, hiveColNameOptiqPosMap); + + return filterRel; + } + + private RelNode genFilterRelNode(QB qb, ASTNode searchCond, RelNode srcRel, + Map aliasToRel, boolean forHavingClause) throws SemanticException { + /* + * Handle Subquery predicates. + * + * Notes (8/22/14 hb): Why is this a copy of the code from {@link + * #genFilterPlan} - for now we will support the same behavior as non CBO + * route. - but plan to allow nested SubQueries(Restriction.9.m) and + * multiple SubQuery expressions(Restriction.8.m). This requires use to + * utilize Optiq's Decorrelation mechanics, and for Optiq to fix/flush out + * Null semantics(OPTIQ-373) - besides only the driving code has been + * copied. Most of the code which is SubQueryUtils and QBSubQuery is + * reused. + */ + int numSrcColumns = srcRel.getRowType().getFieldCount(); + List subQueriesInOriginalTree = SubQueryUtils.findSubQueries(searchCond); + if (subQueriesInOriginalTree.size() > 0) { + + /* + * Restriction.9.m :: disallow nested SubQuery expressions. + */ + if (qb.getSubQueryPredicateDef() != null) { + throw new SemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg( + subQueriesInOriginalTree.get(0), "Nested SubQuery expressions are not supported.")); + } + + /* + * Restriction.8.m :: We allow only 1 SubQuery expression per Query. + */ + if (subQueriesInOriginalTree.size() > 1) { + + throw new SemanticException(ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg( + subQueriesInOriginalTree.get(1), "Only 1 SubQuery expression is supported.")); + } + + /* + * Clone the Search AST; apply all rewrites on the clone. + */ + ASTNode clonedSearchCond = (ASTNode) SubQueryUtils.adaptor.dupTree(searchCond); + List subQueries = SubQueryUtils.findSubQueries(clonedSearchCond); + + RowResolver inputRR = relToHiveRR.get(srcRel); + RowResolver outerQBRR = inputRR; + ImmutableMap outerQBPosMap = + relToHiveColNameOptiqPosMap.get(srcRel); + + for (int i = 0; i < subQueries.size(); i++) { + ASTNode subQueryAST = subQueries.get(i); + ASTNode originalSubQueryAST = subQueriesInOriginalTree.get(i); + + int sqIdx = qb.incrNumSubQueryPredicates(); + clonedSearchCond = SubQueryUtils.rewriteParentQueryWhere(clonedSearchCond, subQueryAST); + + QBSubQuery subQuery = SubQueryUtils.buildSubQuery(qb.getId(), sqIdx, subQueryAST, + originalSubQueryAST, ctx); + + if (!forHavingClause) { + qb.setWhereClauseSubQueryPredicate(subQuery); + } else { + qb.setHavingClauseSubQueryPredicate(subQuery); + } + String havingInputAlias = null; + + if (forHavingClause) { + havingInputAlias = "gby_sq" + sqIdx; + aliasToRel.put(havingInputAlias, srcRel); + } + + subQuery.validateAndRewriteAST(inputRR, forHavingClause, havingInputAlias, + aliasToRel.keySet()); + + QB qbSQ = new QB(subQuery.getOuterQueryId(), subQuery.getAlias(), true); + qbSQ.setSubQueryDef(subQuery.getSubQuery()); + Phase1Ctx ctx_1 = initPhase1Ctx(); + doPhase1(subQuery.getSubQueryAST(), qbSQ, ctx_1); + getMetaData(qbSQ); + RelNode subQueryRelNode = genLogicalPlan(qbSQ, false); + aliasToRel.put(subQuery.getAlias(), subQueryRelNode); + RowResolver sqRR = relToHiveRR.get(subQueryRelNode); + + /* + * Check.5.h :: For In and Not In the SubQuery must implicitly or + * explicitly only contain one select item. + */ + if (subQuery.getOperator().getType() != SubQueryType.EXISTS + && subQuery.getOperator().getType() != SubQueryType.NOT_EXISTS + && sqRR.getColumnInfos().size() - subQuery.getNumOfCorrelationExprsAddedToSQSelect() > 1) { + throw new SemanticException(ErrorMsg.INVALID_SUBQUERY_EXPRESSION.getMsg(subQueryAST, + "SubQuery can contain only 1 item in Select List.")); + } + + /* + * If this is a Not In SubQuery Predicate then Join in the Null Check + * SubQuery. See QBSubQuery.NotInCheck for details on why and how this + * is constructed. + */ + if (subQuery.getNotInCheck() != null) { + QBSubQuery.NotInCheck notInCheck = subQuery.getNotInCheck(); + notInCheck.setSQRR(sqRR); + QB qbSQ_nic = new QB(subQuery.getOuterQueryId(), notInCheck.getAlias(), true); + qbSQ_nic.setSubQueryDef(notInCheck.getSubQuery()); + ctx_1 = initPhase1Ctx(); + doPhase1(notInCheck.getSubQueryAST(), qbSQ_nic, ctx_1); + getMetaData(qbSQ_nic); + RelNode subQueryNICRelNode = genLogicalPlan(qbSQ_nic, false); + aliasToRel.put(notInCheck.getAlias(), subQueryNICRelNode); + srcRel = genJoinRelNode(srcRel, subQueryNICRelNode, + // set explicitly to inner until we figure out SemiJoin use + // notInCheck.getJoinType(), + JoinType.INNER, notInCheck.getJoinConditionAST()); + inputRR = relToHiveRR.get(srcRel); + if (forHavingClause) { + aliasToRel.put(havingInputAlias, srcRel); + } + } + + /* + * Gen Join between outer Operator and SQ op + */ + subQuery.buildJoinCondition(inputRR, sqRR, forHavingClause, havingInputAlias); + srcRel = genJoinRelNode(srcRel, subQueryRelNode, subQuery.getJoinType(), + subQuery.getJoinConditionAST()); + searchCond = subQuery.updateOuterQueryFilter(clonedSearchCond); + + srcRel = genFilterRelNode(searchCond, srcRel); + + /* + * For Not Exists and Not In, add a projection on top of the Left + * Outer Join. + */ + if (subQuery.getOperator().getType() != SubQueryType.NOT_EXISTS + || subQuery.getOperator().getType() != SubQueryType.NOT_IN) { + srcRel = projectLeftOuterSide(srcRel, numSrcColumns); + } + } + relToHiveRR.put(srcRel, outerQBRR); + relToHiveColNameOptiqPosMap.put(srcRel, outerQBPosMap); + return srcRel; + } + + return genFilterRelNode(searchCond, srcRel); + } + + private RelNode projectLeftOuterSide(RelNode srcRel, int numColumns) throws SemanticException { + RowResolver iRR = relToHiveRR.get(srcRel); + RowResolver oRR = new RowResolver(); + RowResolver.add(oRR, iRR, 0, numColumns); + + List optiqColLst = new ArrayList(); + List oFieldNames = new ArrayList(); + RelDataType iType = srcRel.getRowType(); + + for (int i = 0; i < iType.getFieldCount(); i++) { + RelDataTypeField fType = iType.getFieldList().get(i); + String fName = iType.getFieldNames().get(i); + optiqColLst.add(cluster.getRexBuilder().makeInputRef(fType.getType(), i)); + oFieldNames.add(fName); + } + + HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, oFieldNames); + + this.relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(oRR, selRel)); + this.relToHiveRR.put(selRel, oRR); + return selRel; + } + + private RelNode genFilterLogicalPlan(QB qb, RelNode srcRel, Map aliasToRel, + boolean forHavingClause) throws SemanticException { + RelNode filterRel = null; + + Iterator whereClauseIterator = getQBParseInfo(qb).getDestToWhereExpr().values() + .iterator(); + if (whereClauseIterator.hasNext()) { + filterRel = genFilterRelNode(qb, (ASTNode) whereClauseIterator.next().getChild(0), srcRel, + aliasToRel, forHavingClause); + } + + return filterRel; + } + + /** + * Class to store GenericUDAF related information. + */ + private class AggInfo { + private final List m_aggParams; + private final TypeInfo m_returnType; + private final String m_udfName; + private final boolean m_distinct; + + private AggInfo(List aggParams, TypeInfo returnType, String udfName, + boolean isDistinct) { + m_aggParams = aggParams; + m_returnType = returnType; + m_udfName = udfName; + m_distinct = isDistinct; + } + } + + private AggregateCall convertGBAgg(AggInfo agg, RelNode input, List gbChildProjLst, + RexNodeConverter converter, HashMap rexNodeToPosMap, + Integer childProjLstIndx) throws SemanticException { + + // 1. Get agg fn ret type in Optiq + RelDataType aggFnRetType = TypeConverter.convert(agg.m_returnType, + this.cluster.getTypeFactory()); + + // 2. Convert Agg Fn args and type of args to Optiq + // TODO: Does HQL allows expressions as aggregate args or can it only be + // projections from child? + Integer inputIndx; + List argList = new ArrayList(); + RexNode rexNd = null; + RelDataTypeFactory dtFactory = this.cluster.getTypeFactory(); + ImmutableList.Builder aggArgRelDTBldr = new ImmutableList.Builder(); + for (ExprNodeDesc expr : agg.m_aggParams) { + rexNd = converter.convert(expr); + inputIndx = rexNodeToPosMap.get(rexNd.toString()); + if (inputIndx == null) { + gbChildProjLst.add(rexNd); + rexNodeToPosMap.put(rexNd.toString(), childProjLstIndx); + inputIndx = childProjLstIndx; + childProjLstIndx++; + } + argList.add(inputIndx); + + // TODO: does arg need type cast? + aggArgRelDTBldr.add(TypeConverter.convert(expr.getTypeInfo(), dtFactory)); + } + + // 3. Get Aggregation FN from Optiq given name, ret type and input arg + // type + final Aggregation aggregation = SqlFunctionConverter.getOptiqAggFn(agg.m_udfName, + aggArgRelDTBldr.build(), aggFnRetType); + + return new AggregateCall(aggregation, agg.m_distinct, argList, aggFnRetType, null); + } + + private RelNode genGBRelNode(List gbExprs, List aggInfoLst, + RelNode srcRel) throws SemanticException { + RowResolver gbInputRR = this.relToHiveRR.get(srcRel); + ImmutableMap posMap = this.relToHiveColNameOptiqPosMap.get(srcRel); + RexNodeConverter converter = new RexNodeConverter(this.cluster, srcRel.getRowType(), + posMap, 0, false); + + final List gbChildProjLst = Lists.newArrayList(); + final HashMap rexNodeToPosMap = new HashMap(); + final BitSet groupSet = new BitSet(); + Integer gbIndx = 0; + RexNode rnd; + for (ExprNodeDesc key : gbExprs) { + rnd = converter.convert(key); + gbChildProjLst.add(rnd); + groupSet.set(gbIndx); + rexNodeToPosMap.put(rnd.toString(), gbIndx); + gbIndx++; + } + + List aggregateCalls = Lists.newArrayList(); + int i = aggInfoLst.size(); + for (AggInfo agg : aggInfoLst) { + aggregateCalls.add(convertGBAgg(agg, srcRel, gbChildProjLst, converter, rexNodeToPosMap, + gbChildProjLst.size())); + } + + if (gbChildProjLst.isEmpty()) { + // This will happen for count(*), in such cases we arbitarily pick + // first element from srcRel + gbChildProjLst.add(this.cluster.getRexBuilder().makeInputRef(srcRel, 0)); + } + RelNode gbInputRel = HiveProjectRel.create(srcRel, gbChildProjLst, null); + + HiveRel aggregateRel = null; + try { + aggregateRel = new HiveAggregateRel(cluster, cluster.traitSetOf(HiveRel.CONVENTION), + gbInputRel, groupSet, aggregateCalls); + } catch (InvalidRelException e) { + throw new SemanticException(e); + } + + return aggregateRel; + } + + private void addAlternateGByKeyMappings(ASTNode gByExpr, ColumnInfo colInfo, + RowResolver gByInputRR, RowResolver gByRR) { + if (gByExpr.getType() == HiveParser.DOT + && gByExpr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL) { + String tab_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(0).getChild(0) + .getText()); + String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(1).getText()); + gByRR.put(tab_alias, col_alias, colInfo); + } else if (gByExpr.getType() == HiveParser.TOK_TABLE_OR_COL) { + String col_alias = BaseSemanticAnalyzer.unescapeIdentifier(gByExpr.getChild(0).getText()); + String tab_alias = null; + /* + * If the input to the GBy has a tab alias for the column, then add an + * entry based on that tab_alias. For e.g. this query: select b.x, + * count(*) from t1 b group by x needs (tab_alias=b, col_alias=x) in the + * GBy RR. tab_alias=b comes from looking at the RowResolver that is the + * ancestor before any GBy/ReduceSinks added for the GBY operation. + */ + try { + ColumnInfo pColInfo = gByInputRR.get(tab_alias, col_alias); + tab_alias = pColInfo == null ? null : pColInfo.getTabAlias(); + } catch (SemanticException se) { + } + gByRR.put(tab_alias, col_alias, colInfo); + } + } + + private void addToGBExpr(RowResolver groupByOutputRowResolver, + RowResolver groupByInputRowResolver, ASTNode grpbyExpr, ExprNodeDesc grpbyExprNDesc, + List gbExprNDescLst, List outputColumnNames) { + // TODO: Should we use grpbyExprNDesc.getTypeInfo()? what if expr is + // UDF + int i = gbExprNDescLst.size(); + String field = getColumnInternalName(i); + outputColumnNames.add(field); + gbExprNDescLst.add(grpbyExprNDesc); + + ColumnInfo oColInfo = new ColumnInfo(field, grpbyExprNDesc.getTypeInfo(), null, false); + groupByOutputRowResolver.putExpression(grpbyExpr, oColInfo); + + addAlternateGByKeyMappings(grpbyExpr, oColInfo, groupByInputRowResolver, + groupByOutputRowResolver); + } + + private AggInfo getHiveAggInfo(ASTNode aggAst, int aggFnLstArgIndx, RowResolver inputRR) + throws SemanticException { + AggInfo aInfo = null; + + // 1 Convert UDAF Params to ExprNodeDesc + ArrayList aggParameters = new ArrayList(); + for (int i = 1; i <= aggFnLstArgIndx; i++) { + ASTNode paraExpr = (ASTNode) aggAst.getChild(i); + ExprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, inputRR); + aggParameters.add(paraExprNode); + } + + // 2. Is this distinct UDAF + boolean isDistinct = aggAst.getType() == HiveParser.TOK_FUNCTIONDI; + + // 3. Determine type of UDAF + TypeInfo udafRetType = null; + + // 3.1 Obtain UDAF name + String aggName = unescapeIdentifier(aggAst.getChild(0).getText()); + + // 3.2 Rank functions type is 'int'/'double' + if (FunctionRegistry.isRankingFunction(aggName)) { + if (aggName.equalsIgnoreCase("percent_rank")) + udafRetType = TypeInfoFactory.doubleTypeInfo; + else + udafRetType = TypeInfoFactory.intTypeInfo; + } else { + // 3.3 Try obtaining UDAF evaluators to determine the ret type + try { + boolean isAllColumns = aggAst.getType() == HiveParser.TOK_FUNCTIONSTAR; + + // 3.3.1 Get UDAF Evaluator + Mode amode = groupByDescModeToUDAFMode(GroupByDesc.Mode.COMPLETE, isDistinct); + + GenericUDAFEvaluator genericUDAFEvaluator = null; + if (aggName.toLowerCase().equals(FunctionRegistry.LEAD_FUNC_NAME) + || aggName.toLowerCase().equals(FunctionRegistry.LAG_FUNC_NAME)) { + ArrayList originalParameterTypeInfos = + getWritableObjectInspector(aggParameters); + genericUDAFEvaluator = + FunctionRegistry.getGenericWindowingEvaluator(aggName, + originalParameterTypeInfos, isDistinct, isAllColumns); + GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); + udafRetType = ((ListTypeInfo)udaf.returnType).getListElementTypeInfo(); + } else { + genericUDAFEvaluator = getGenericUDAFEvaluator(aggName, + aggParameters, aggAst, isDistinct, isAllColumns); + assert (genericUDAFEvaluator != null); + + // 3.3.2 Get UDAF Info using UDAF Evaluator + GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); + udafRetType = udaf.returnType; + } + } catch (Exception e) { + LOG.debug("CBO: Couldn't Obtain UDAF evaluators for " + aggName + + ", trying to translate to GenericUDF"); + } + + // 3.4 Try GenericUDF translation + if (udafRetType == null) { + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + // We allow stateful functions in the SELECT list (but nowhere else) + tcCtx.setAllowStatefulFunctions(true); + tcCtx.setAllowDistinctFunctions(false); + ExprNodeDesc exp = genExprNodeDesc((ASTNode) aggAst.getChild(0), inputRR, tcCtx); + udafRetType = exp.getTypeInfo(); + } + } + + // 4. Construct AggInfo + aInfo = new AggInfo(aggParameters, udafRetType, aggName, isDistinct); + + return aInfo; + } + + /** + * Generate GB plan. + * + * @param qb + * @param srcRel + * @return TODO: 1. Grouping Sets (roll up..) + * @throws SemanticException + */ + private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException { + RelNode gbRel = null; + QBParseInfo qbp = getQBParseInfo(qb); + + // 0. for GSets, Cube, Rollup, bail from Optiq path. + if (!qbp.getDestRollups().isEmpty() + || !qbp.getDestGroupingSets().isEmpty() + || !qbp.getDestCubes().isEmpty()) { + String gbyClause = null; + HashMap gbysMap = qbp.getDestToGroupBy(); + if (gbysMap.size() == 1) { + ASTNode gbyAST = gbysMap.entrySet().iterator().next().getValue(); + gbyClause = SemanticAnalyzer.this.ctx.getTokenRewriteStream() + .toString(gbyAST.getTokenStartIndex(), + gbyAST.getTokenStopIndex()); + gbyClause = "in '" + gbyClause + "'."; + } else { + gbyClause = "."; + } + String msg = String.format("Encountered Grouping Set/Cube/Rollup%s" + + " Currently we don't support Grouping Set/Cube/Rollup" + + " clauses in CBO," + " turn off cbo for these queries.", + gbyClause); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + + // 1. Gather GB Expressions (AST) (GB + Aggregations) + // NOTE: Multi Insert is not supported + String detsClauseName = qbp.getClauseNames().iterator().next(); + List grpByAstExprs = getGroupByForClause(qbp, detsClauseName); + HashMap aggregationTrees = qbp.getAggregationExprsForClause(detsClauseName); + boolean hasGrpByAstExprs = (grpByAstExprs != null && !grpByAstExprs.isEmpty()) ? true : false; + boolean hasAggregationTrees = (aggregationTrees != null && !aggregationTrees.isEmpty()) ? true + : false; + + if (hasGrpByAstExprs || hasAggregationTrees) { + ArrayList gbExprNDescLst = new ArrayList(); + ArrayList outputColumnNames = new ArrayList(); + + // 2. Input, Output Row Resolvers + RowResolver groupByInputRowResolver = this.relToHiveRR.get(srcRel); + RowResolver groupByOutputRowResolver = new RowResolver(); + groupByOutputRowResolver.setIsExprResolver(true); + + if (hasGrpByAstExprs) { + // 3. Construct GB Keys (ExprNode) + for (int i = 0; i < grpByAstExprs.size(); ++i) { + ASTNode grpbyExpr = grpByAstExprs.get(i); + Map astToExprNDescMap = TypeCheckProcFactory.genExprNode( + grpbyExpr, new TypeCheckCtx(groupByInputRowResolver)); + ExprNodeDesc grpbyExprNDesc = astToExprNDescMap.get(grpbyExpr); + if (grpbyExprNDesc == null) + throw new RuntimeException("Invalid Column Reference: " + grpbyExpr.dump()); + + addToGBExpr(groupByOutputRowResolver, groupByInputRowResolver, grpbyExpr, + grpbyExprNDesc, gbExprNDescLst, outputColumnNames); + } + } + + // 4. Construct aggregation function Info + ArrayList aggregations = new ArrayList(); + if (hasAggregationTrees) { + assert (aggregationTrees != null); + for (ASTNode value : aggregationTrees.values()) { + // 4.1 Determine type of UDAF + // This is the GenericUDAF name + String aggName = unescapeIdentifier(value.getChild(0).getText()); + boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI; + boolean isAllColumns = value.getType() == HiveParser.TOK_FUNCTIONSTAR; + + // 4.2 Convert UDAF Params to ExprNodeDesc + ArrayList aggParameters = new ArrayList(); + for (int i = 1; i < value.getChildCount(); i++) { + ASTNode paraExpr = (ASTNode) value.getChild(i); + ExprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, groupByInputRowResolver); + aggParameters.add(paraExprNode); + } + + Mode amode = groupByDescModeToUDAFMode(GroupByDesc.Mode.COMPLETE, isDistinct); + GenericUDAFEvaluator genericUDAFEvaluator = getGenericUDAFEvaluator(aggName, + aggParameters, value, isDistinct, isAllColumns); + assert (genericUDAFEvaluator != null); + GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); + AggInfo aInfo = new AggInfo(aggParameters, udaf.returnType, aggName, isDistinct); + aggregations.add(aInfo); + String field = getColumnInternalName(gbExprNDescLst.size() + aggregations.size() - 1); + outputColumnNames.add(field); + groupByOutputRowResolver.putExpression(value, new ColumnInfo(field, aInfo.m_returnType, + "", false)); + } + } + + gbRel = genGBRelNode(gbExprNDescLst, aggregations, srcRel); + relToHiveColNameOptiqPosMap.put(gbRel, + buildHiveToOptiqColumnMap(groupByOutputRowResolver, gbRel)); + this.relToHiveRR.put(gbRel, groupByOutputRowResolver); + } + + return gbRel; + } + + /** + * Generate OB RelNode and input Select RelNode that should be used to + * introduce top constraining Project. If Input select RelNode is not + * present then don't introduce top constraining select. + * + * @param qb + * @param srcRel + * @param outermostOB + * @return Pair Key- OB RelNode, Value - Input Select for + * top constraining Select + * @throws SemanticException + */ + private Pair genOBLogicalPlan(QB qb, RelNode srcRel, boolean outermostOB) + throws SemanticException { + RelNode sortRel = null; + RelNode originalOBChild = null; + + QBParseInfo qbp = getQBParseInfo(qb); + String dest = qbp.getClauseNames().iterator().next(); + ASTNode obAST = qbp.getOrderByForClause(dest); + + if (obAST != null) { + // 1. OB Expr sanity test + // in strict mode, in the presence of order by, limit must be specified + Integer limit = qb.getParseInfo().getDestLimit(dest); + if (conf.getVar(HiveConf.ConfVars.HIVEMAPREDMODE).equalsIgnoreCase("strict") + && limit == null) { + throw new SemanticException(generateErrorMessage(obAST, + ErrorMsg.NO_LIMIT_WITH_ORDERBY.getMsg())); + } + + // 2. Walk through OB exprs and extract field collations and additional + // virtual columns needed + final List newVCLst = new ArrayList(); + final List fieldCollations = Lists.newArrayList(); + int fieldIndex = 0; + + List obASTExprLst = obAST.getChildren(); + ASTNode obASTExpr; + List> vcASTTypePairs = new ArrayList>(); + RowResolver inputRR = relToHiveRR.get(srcRel); + RowResolver outputRR = new RowResolver(); + + RexNode rnd; + RexNodeConverter converter = new RexNodeConverter(cluster, srcRel.getRowType(), + relToHiveColNameOptiqPosMap.get(srcRel), 0, false); + int srcRelRecordSz = srcRel.getRowType().getFieldCount(); + + for (int i = 0; i < obASTExprLst.size(); i++) { + // 2.1 Convert AST Expr to ExprNode + obASTExpr = (ASTNode) obASTExprLst.get(i); + Map astToExprNDescMap = TypeCheckProcFactory.genExprNode( + obASTExpr, new TypeCheckCtx(inputRR)); + ExprNodeDesc obExprNDesc = astToExprNDescMap.get(obASTExpr.getChild(0)); + if (obExprNDesc == null) + throw new SemanticException("Invalid order by expression: " + obASTExpr.toString()); + + // 2.2 Convert ExprNode to RexNode + rnd = converter.convert(obExprNDesc); + + // 2.3 Determine the index of ob expr in child schema + // NOTE: Optiq can not take compound exprs in OB without it being + // present in the child (& hence we add a child Project Rel) + if (rnd instanceof RexInputRef) { + fieldIndex = ((RexInputRef) rnd).getIndex(); + } else { + fieldIndex = srcRelRecordSz + newVCLst.size(); + newVCLst.add(rnd); + vcASTTypePairs.add(new Pair((ASTNode) obASTExpr.getChild(0), + obExprNDesc.getTypeInfo())); + } + + // 2.4 Determine the Direction of order by + org.eigenbase.rel.RelFieldCollation.Direction order = RelFieldCollation.Direction.DESCENDING; + if (obASTExpr.getType() == HiveParser.TOK_TABSORTCOLNAMEASC) { + order = RelFieldCollation.Direction.ASCENDING; + } + + // 2.5 Add to field collations + fieldCollations.add(new RelFieldCollation(fieldIndex, order)); + } + + // 3. Add Child Project Rel if needed, Generate Output RR, input Sel Rel + // for top constraining Sel + RelNode obInputRel = srcRel; + if (!newVCLst.isEmpty()) { + List originalInputRefs = Lists.transform(srcRel.getRowType().getFieldList(), + new Function() { + @Override + public RexNode apply(RelDataTypeField input) { + return new RexInputRef(input.getIndex(), input.getType()); + } + }); + RowResolver obSyntheticProjectRR = new RowResolver(); + RowResolver.add(obSyntheticProjectRR, inputRR, 0); + int vcolPos = inputRR.getRowSchema().getSignature().size(); + for (Pair astTypePair : vcASTTypePairs) { + obSyntheticProjectRR.putExpression(astTypePair.getKey(), new ColumnInfo( + getColumnInternalName(vcolPos), astTypePair.getValue(), null, false)); + vcolPos++; + } + obInputRel = genSelectRelNode(CompositeList.of(originalInputRefs, newVCLst), + obSyntheticProjectRR, srcRel); + + if (outermostOB) { + RowResolver.add(outputRR, inputRR, 0); + + } else { + RowResolver.add(outputRR, obSyntheticProjectRR, 0); + originalOBChild = srcRel; + } + } else { + RowResolver.add(outputRR, inputRR, 0); + } + + // 4. Construct SortRel + RelTraitSet traitSet = cluster.traitSetOf(HiveRel.CONVENTION); + RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.of(fieldCollations)); + sortRel = new HiveSortRel(cluster, traitSet, obInputRel, canonizedCollation, null, null); + + // 5. Update the maps + // NOTE: Output RR for SortRel is considered same as its input; we may + // end up not using VC that is present in sort rel. Also note that + // rowtype of sortrel is the type of it child; if child happens to be + // synthetic project that we introduced then that projectrel would + // contain the vc. + ImmutableMap hiveColNameOptiqPosMap = buildHiveToOptiqColumnMap(outputRR, + sortRel); + relToHiveRR.put(sortRel, outputRR); + relToHiveColNameOptiqPosMap.put(sortRel, hiveColNameOptiqPosMap); + } + + return (new Pair(sortRel, originalOBChild)); + } + + private RelNode genLimitLogicalPlan(QB qb, RelNode srcRel) throws SemanticException { + HiveRel sortRel = null; + QBParseInfo qbp = getQBParseInfo(qb); + Integer limit = qbp.getDestToLimit().get(qbp.getClauseNames().iterator().next()); + + if (limit != null) { + RexNode fetch = cluster.getRexBuilder().makeExactLiteral(BigDecimal.valueOf(limit)); + RelTraitSet traitSet = cluster.traitSetOf(HiveRel.CONVENTION); + RelCollation canonizedCollation = traitSet.canonize(RelCollationImpl.EMPTY); + sortRel = new HiveSortRel(cluster, traitSet, srcRel, canonizedCollation, null, fetch); + + RowResolver outputRR = new RowResolver(); + RowResolver.add(outputRR, relToHiveRR.get(srcRel), 0); + ImmutableMap hiveColNameOptiqPosMap = buildHiveToOptiqColumnMap(outputRR, + sortRel); + relToHiveRR.put(sortRel, outputRR); + relToHiveColNameOptiqPosMap.put(sortRel, hiveColNameOptiqPosMap); + } + + return sortRel; + } + + List getPartitionKeys(PartitionSpec ps, RexNodeConverter converter, RowResolver inputRR) + throws SemanticException { + List pKeys = new ArrayList(); + if (ps != null) { + List pExprs = ps.getExpressions(); + for (PartitionExpression pExpr : pExprs) { + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + tcCtx.setAllowStatefulFunctions(true); + ExprNodeDesc exp = genExprNodeDesc(pExpr.getExpression(), inputRR, tcCtx); + pKeys.add(converter.convert(exp)); + } + } + + return pKeys; + } + + List getOrderKeys(OrderSpec os, RexNodeConverter converter, + RowResolver inputRR) throws SemanticException { + List oKeys = new ArrayList(); + if (os != null) { + List oExprs = os.getExpressions(); + for (OrderExpression oExpr : oExprs) { + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + tcCtx.setAllowStatefulFunctions(true); + ExprNodeDesc exp = genExprNodeDesc(oExpr.getExpression(), inputRR, tcCtx); + RexNode ordExp = converter.convert(exp); + Set flags = new HashSet(); + if (oExpr.getOrder() == org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.DESC) + flags.add(SqlKind.DESCENDING); + oKeys.add(new RexFieldCollation(ordExp, flags)); + } + } + + return oKeys; + } + + RexWindowBound getBound(BoundarySpec bs, RexNodeConverter converter) { + RexWindowBound rwb = null; + + if (bs != null) { + SqlNode sn = null; + SqlParserPos pos = new SqlParserPos(1, 1); + SqlNode amt = bs.getAmt() == 0 ? null : SqlLiteral.createExactNumeric( + String.valueOf(bs.getAmt()), new SqlParserPos(2, 2)); + RexNode amtLiteral = null; + SqlCall sc = null; + RexNode rn = null; + + if (amt != null) + amtLiteral = cluster.getRexBuilder().makeLiteral(new Integer(bs.getAmt()), + cluster.getTypeFactory().createSqlType(SqlTypeName.INTEGER), true); + + switch (bs.getDirection()) { + case PRECEDING: + if (amt == null) { + rwb = RexWindowBound.create(SqlWindow.createUnboundedPreceding(pos), null); + } else { + sc = (SqlCall) SqlWindow.createPreceding(amt, pos); + rwb = RexWindowBound.create(sc, + cluster.getRexBuilder().makeCall(sc.getOperator(), amtLiteral)); + } + break; + + case CURRENT: + rwb = RexWindowBound.create(SqlWindow.createCurrentRow(new SqlParserPos(1, 1)), null); + break; + + case FOLLOWING: + if (amt == null) { + rwb = RexWindowBound.create(SqlWindow.createUnboundedFollowing(new SqlParserPos(1, 1)), + null); + } else { + sc = (SqlCall) SqlWindow.createFollowing(amt, pos); + rwb = RexWindowBound.create(sc, + cluster.getRexBuilder().makeCall(sc.getOperator(), amtLiteral)); + } + break; + } + } + + return rwb; + } + + int getWindowSpecIndx(ASTNode wndAST) { + int wndASTIndx = -1; + int wi = wndAST.getChildCount() - 1; + if (wi <= 0 || (wndAST.getChild(wi).getType() != HiveParser.TOK_WINDOWSPEC)) { + wi = -1; + } + + return wi; + } + + Pair genWindowingProj(QB qb, WindowExpressionSpec wExpSpec, RelNode srcRel) + throws SemanticException { + RexNode w = null; + TypeInfo wHiveRetType = null; + + if (wExpSpec instanceof WindowFunctionSpec) { + WindowFunctionSpec wFnSpec = (WindowFunctionSpec) wExpSpec; + ASTNode windowProjAst = wFnSpec.getExpression(); + // TODO: do we need to get to child? + int wndSpecASTIndx = getWindowSpecIndx(windowProjAst); + // 2. Get Hive Aggregate Info + AggInfo hiveAggInfo = getHiveAggInfo(windowProjAst, wndSpecASTIndx - 1, + this.relToHiveRR.get(srcRel)); + + // 3. Get Optiq Return type for Agg Fn + wHiveRetType = hiveAggInfo.m_returnType; + RelDataType optiqAggFnRetType = TypeConverter.convert(hiveAggInfo.m_returnType, + this.cluster.getTypeFactory()); + + // 4. Convert Agg Fn args to Optiq + ImmutableMap posMap = this.relToHiveColNameOptiqPosMap.get(srcRel); + RexNodeConverter converter = new RexNodeConverter(this.cluster, srcRel.getRowType(), + posMap, 0, false); + Builder optiqAggFnArgsBldr = ImmutableList. builder(); + Builder optiqAggFnArgsTypeBldr = ImmutableList. builder(); + RexNode rexNd = null; + for (int i = 0; i < hiveAggInfo.m_aggParams.size(); i++) { + optiqAggFnArgsBldr.add(converter.convert(hiveAggInfo.m_aggParams.get(i))); + optiqAggFnArgsTypeBldr.add(TypeConverter.convert(hiveAggInfo.m_aggParams.get(i) + .getTypeInfo(), this.cluster.getTypeFactory())); + } + ImmutableList optiqAggFnArgs = optiqAggFnArgsBldr.build(); + ImmutableList optiqAggFnArgsType = optiqAggFnArgsTypeBldr.build(); + + // 5. Get Optiq Agg Fn + final SqlAggFunction optiqAggFn = SqlFunctionConverter.getOptiqAggFn(hiveAggInfo.m_udfName, + optiqAggFnArgsType, optiqAggFnRetType); + + // 6. Translate Window spec + RowResolver inputRR = relToHiveRR.get(srcRel); + WindowSpec wndSpec = ((WindowFunctionSpec) wExpSpec).getWindowSpec(); + List partitionKeys = getPartitionKeys(wndSpec.getPartition(), converter, inputRR); + List orderKeys = getOrderKeys(wndSpec.getOrder(), converter, inputRR); + RexWindowBound upperBound = getBound(wndSpec.windowFrame.start, converter); + RexWindowBound lowerBound = getBound(wndSpec.windowFrame.end, converter); + boolean isRows = ((wndSpec.windowFrame.start instanceof RangeBoundarySpec) || (wndSpec.windowFrame.end instanceof RangeBoundarySpec)) ? true + : false; + + w = cluster.getRexBuilder().makeOver(optiqAggFnRetType, optiqAggFn, optiqAggFnArgs, + partitionKeys, ImmutableList. copyOf(orderKeys), lowerBound, + upperBound, isRows, true, false); + } else { + // TODO: Convert to Semantic Exception + throw new RuntimeException("Unsupported window Spec"); + } + + return new Pair(w, wHiveRetType); + } + + private RelNode genSelectForWindowing(QB qb, RelNode srcRel) throws SemanticException { + RelNode selOpForWindow = null; + QBParseInfo qbp = getQBParseInfo(qb); + WindowingSpec wSpec = (!qb.getAllWindowingSpecs().isEmpty()) ? qb.getAllWindowingSpecs() + .values().iterator().next() : null; + + if (wSpec != null) { + // 1. Get valid Window Function Spec + wSpec.validateAndMakeEffective(); + List windowExpressions = wSpec.getWindowExpressions(); + + if (windowExpressions != null && !windowExpressions.isEmpty()) { + RowResolver inputRR = this.relToHiveRR.get(srcRel); + // 2. Get RexNodes for original Projections from below + List projsForWindowSelOp = new ArrayList( + HiveOptiqUtil.getProjsFromBelowAsInputRef(srcRel)); + + // 3. Construct new Row Resolver with everything from below. + RowResolver out_rwsch = new RowResolver(); + RowResolver.add(out_rwsch, inputRR, 0); + + // 4. Walk through Window Expressions & Construct RexNodes for those, + // Update out_rwsch + for (WindowExpressionSpec wExprSpec : windowExpressions) { + if (out_rwsch.getExpression(wExprSpec.getExpression()) == null) { + Pair wtp = genWindowingProj(qb, wExprSpec, srcRel); + projsForWindowSelOp.add(wtp.getKey()); + + // 6.2.2 Update Output Row Schema + ColumnInfo oColInfo = new ColumnInfo( + getColumnInternalName(projsForWindowSelOp.size()), wtp.getValue(), null, false); + if (false) { + out_rwsch.checkColumn(null, wExprSpec.getAlias()); + out_rwsch.put(null, wExprSpec.getAlias(), oColInfo); + } else { + out_rwsch.putExpression(wExprSpec.getExpression(), oColInfo); + } + } + } + + selOpForWindow = genSelectRelNode(projsForWindowSelOp, out_rwsch, srcRel); + } + } + + return selOpForWindow; + } + + private RelNode genSelectRelNode(List optiqColLst, RowResolver out_rwsch, + RelNode srcRel) throws OptiqSemanticException { + // 1. Build Column Names + Set colNamesSet = new HashSet(); + List cInfoLst = out_rwsch.getRowSchema().getSignature(); + ArrayList columnNames = new ArrayList(); + String[] qualifiedColNames; + String tmpColAlias; + for (int i = 0; i < optiqColLst.size(); i++) { + ColumnInfo cInfo = cInfoLst.get(i); + qualifiedColNames = out_rwsch.reverseLookup(cInfo.getInternalName()); + /* + if (qualifiedColNames[0] != null && !qualifiedColNames[0].isEmpty()) + tmpColAlias = qualifiedColNames[0] + "." + qualifiedColNames[1]; + else + */ + tmpColAlias = qualifiedColNames[1]; + + // Prepend column names with '_o_' if it starts with '_c' + /* + * Hive treats names that start with '_c' as internalNames; so change + * the names so we don't run into this issue when converting back to + * Hive AST. + */ + if (tmpColAlias.startsWith("_c")) + tmpColAlias = "_o_" + tmpColAlias; + int suffix = 1; + while (colNamesSet.contains(tmpColAlias)) { + tmpColAlias = qualifiedColNames[1] + suffix; + suffix++; + } + + colNamesSet.add(tmpColAlias); + columnNames.add(tmpColAlias); + } + + // 3 Build Optiq Rel Node for project using converted projections & col + // names + HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, columnNames); + + // 4. Keep track of colname-to-posmap && RR for new select + this.relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(out_rwsch, selRel)); + this.relToHiveRR.put(selRel, out_rwsch); + + return selRel; + } + + private RelNode genSelectRelNode(List optiqColLst, RowResolver out_rwsch, + RelNode srcRel, boolean removethismethod) throws OptiqSemanticException { + // 1. Build Column Names + // TODO: Should this be external names + ArrayList columnNames = new ArrayList(); + for (int i = 0; i < optiqColLst.size(); i++) { + columnNames.add(getColumnInternalName(i)); + } + + // 2. Prepend column names with '_o_' + /* + * Hive treats names that start with '_c' as internalNames; so change the + * names so we don't run into this issue when converting back to Hive AST. + */ + List oFieldNames = Lists.transform(columnNames, new Function() { + @Override + public String apply(String hName) { + return "_o_" + hName; + } + }); + + // 3 Build Optiq Rel Node for project using converted projections & col + // names + HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, oFieldNames); + + // 4. Keep track of colname-to-posmap && RR for new select + this.relToHiveColNameOptiqPosMap.put(selRel, buildHiveToOptiqColumnMap(out_rwsch, selRel)); + this.relToHiveRR.put(selRel, out_rwsch); + + return selRel; + } + + /** + * NOTE: there can only be one select caluse since we don't handle multi + * destination insert. + * + * @throws SemanticException + */ + private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticException { + + // 0. Generate a Select Node for Windowing + RelNode selForWindow = genSelectForWindowing(qb, srcRel); + srcRel = (selForWindow == null) ? srcRel : selForWindow; + + boolean subQuery; + ArrayList col_list = new ArrayList(); + ArrayList> windowingRexNodes = new ArrayList>(); + + // 1. Get Select Expression List + QBParseInfo qbp = getQBParseInfo(qb); + String selClauseName = qbp.getClauseNames().iterator().next(); + ASTNode selExprList = qbp.getSelForClause(selClauseName); + + // 2.Row resolvers for input, output + RowResolver out_rwsch = new RowResolver(); + ASTNode trfm = null; + Integer pos = Integer.valueOf(0); + RowResolver inputRR = this.relToHiveRR.get(srcRel); + + // 3. Query Hints + // TODO: Handle Query Hints; currently we ignore them + boolean selectStar = false; + int posn = 0; + boolean hintPresent = (selExprList.getChild(0).getType() == HiveParser.TOK_HINTLIST); + if (hintPresent) { + String hint = SemanticAnalyzer.this.ctx.getTokenRewriteStream(). + toString( + selExprList.getChild(0).getTokenStartIndex(), + selExprList.getChild(0).getTokenStopIndex()); + String msg = String.format("Hint specified for %s." + + " Currently we don't support hints in CBO, turn off cbo to use hints.", hint); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + + // 4. Determine if select corresponds to a subquery + subQuery = qb.getParseInfo().getIsSubQ(); + + // 4. Bailout if select involves Transform + boolean isInTransform = (selExprList.getChild(posn).getChild(0).getType() == HiveParser.TOK_TRANSFORM); + if (isInTransform) { + String msg = String.format("SELECT TRANSFORM is currently not supported in CBO," + + " turn off cbo to use TRANSFORM."); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + + // 5. Bailout if select involves UDTF + ASTNode udtfExpr = (ASTNode) selExprList.getChild(posn).getChild(0); + GenericUDTF genericUDTF = null; + int udtfExprType = udtfExpr.getType(); + if (udtfExprType == HiveParser.TOK_FUNCTION || udtfExprType == HiveParser.TOK_FUNCTIONSTAR) { + String funcName = TypeCheckProcFactory.DefaultExprProcessor.getFunctionText(udtfExpr, true); + FunctionInfo fi = FunctionRegistry.getFunctionInfo(funcName); + if (fi != null) { + genericUDTF = fi.getGenericUDTF(); + } + if (genericUDTF != null) { + String msg = String.format("UDTF " + funcName + " is currently not supported in CBO," + + " turn off cbo to use UDTF " + funcName); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + } + + // 6. Iterate over all expression (after SELECT) + ASTNode exprList = selExprList; + int startPosn = posn; + int wndProjPos = 0; + List tabAliasesForAllProjs = getTabAliases(inputRR); + for (int i = startPosn; i < exprList.getChildCount(); ++i) { + + // 6.1 child can be EXPR AS ALIAS, or EXPR. + ASTNode child = (ASTNode) exprList.getChild(i); + boolean hasAsClause = (!isInTransform) && (child.getChildCount() == 2); + + // 6.2 EXPR AS (ALIAS,...) parses, but is only allowed for UDTF's + // This check is not needed and invalid when there is a transform b/c + // the + // AST's are slightly different. + if (child.getChildCount() > 2) { + throw new SemanticException(generateErrorMessage((ASTNode) child.getChild(2), + ErrorMsg.INVALID_AS.getMsg())); + } + + ASTNode expr; + String tabAlias; + String colAlias; + + // 6.3 Get rid of TOK_SELEXPR + expr = (ASTNode) child.getChild(0); + String[] colRef = getColAlias(child, autogenColAliasPrfxLbl, inputRR, + autogenColAliasPrfxIncludeFuncName, i); + tabAlias = colRef[0]; + colAlias = colRef[1]; + + // 6.4 Build ExprNode corresponding to colums + if (expr.getType() == HiveParser.TOK_ALLCOLREF) { + pos = genColListRegex(".*", + expr.getChildCount() == 0 ? null : getUnescapedName((ASTNode) expr.getChild(0)) + .toLowerCase(), expr, col_list, inputRR, pos, out_rwsch, tabAliasesForAllProjs, + subQuery); + selectStar = true; + } else if (expr.getType() == HiveParser.TOK_TABLE_OR_COL && !hasAsClause + && !inputRR.getIsExprResolver() + && isRegex(unescapeIdentifier(expr.getChild(0).getText()), conf)) { + // In case the expression is a regex COL. + // This can only happen without AS clause + // We don't allow this for ExprResolver - the Group By case + pos = genColListRegex(unescapeIdentifier(expr.getChild(0).getText()), null, expr, + col_list, inputRR, pos, out_rwsch, tabAliasesForAllProjs, subQuery); + } else if (expr.getType() == HiveParser.DOT + && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL + && inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0).getChild(0).getText() + .toLowerCase())) && !hasAsClause && !inputRR.getIsExprResolver() + && isRegex(unescapeIdentifier(expr.getChild(1).getText()), conf)) { + // In case the expression is TABLE.COL (col can be regex). + // This can only happen without AS clause + // We don't allow this for ExprResolver - the Group By case + pos = genColListRegex(unescapeIdentifier(expr.getChild(1).getText()), + unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()), expr, + col_list, inputRR, pos, out_rwsch, tabAliasesForAllProjs, subQuery); + } else { + // Case when this is an expression + TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); + // We allow stateful functions in the SELECT list (but nowhere else) + tcCtx.setAllowStatefulFunctions(true); + ExprNodeDesc exp = genExprNodeDesc(expr, inputRR, tcCtx); + String recommended = recommendName(exp, colAlias); + if (recommended != null && out_rwsch.get(null, recommended) == null) { + colAlias = recommended; + } + col_list.add(exp); + if (subQuery) { + out_rwsch.checkColumn(tabAlias, colAlias); + } + + ColumnInfo colInfo = new ColumnInfo(getColumnInternalName(pos), + exp.getWritableObjectInspector(), tabAlias, false); + colInfo.setSkewedCol((exp instanceof ExprNodeColumnDesc) ? ((ExprNodeColumnDesc) exp) + .isSkewedCol() : false); + out_rwsch.put(tabAlias, colAlias, colInfo); + + if (exp instanceof ExprNodeColumnDesc) { + ExprNodeColumnDesc colExp = (ExprNodeColumnDesc) exp; + String[] altMapping = inputRR.getAlternateMappings(colExp.getColumn()); + if (altMapping != null) { + out_rwsch.put(altMapping[0], altMapping[1], colInfo); + } + } + + pos = Integer.valueOf(pos.intValue() + 1); + } + } + selectStar = selectStar && exprList.getChildCount() == posn + 1; + + // 7. Convert Hive projections to Optiq + List optiqColLst = new ArrayList(); + RexNodeConverter rexNodeConv = new RexNodeConverter(cluster, srcRel.getRowType(), + buildHiveColNameToInputPosMap(col_list, inputRR), 0, false); + for (ExprNodeDesc colExpr : col_list) { + optiqColLst.add(rexNodeConv.convert(colExpr)); + } + + // 8. Build Optiq Rel + RelNode selRel = genSelectRelNode(optiqColLst, out_rwsch, srcRel); + + return selRel; + } + + private RelNode genLogicalPlan(QBExpr qbexpr) throws SemanticException { + if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) { + return genLogicalPlan(qbexpr.getQB(), false); + } + if (qbexpr.getOpcode() == QBExpr.Opcode.UNION) { + RelNode qbexpr1Ops = genLogicalPlan(qbexpr.getQBExpr1()); + RelNode qbexpr2Ops = genLogicalPlan(qbexpr.getQBExpr2()); + + return genUnionLogicalPlan(qbexpr.getAlias(), qbexpr.getQBExpr1().getAlias(), qbexpr1Ops, + qbexpr.getQBExpr2().getAlias(), qbexpr2Ops); + } + return null; + } + + private RelNode genLogicalPlan(QB qb, boolean outerMostQB) throws SemanticException { + RelNode srcRel = null; + RelNode filterRel = null; + RelNode gbRel = null; + RelNode gbHavingRel = null; + RelNode havingRel = null; + RelNode selectRel = null; + RelNode obRel = null; + RelNode limitRel = null; + + RelNode rootRel = null; + // First generate all the opInfos for the elements in the from clause + Map aliasToRel = new HashMap(); + + // 0. Check if we can handle the query + // This check is needed here because of SubQuery + if (!canHandleQuery(qb, false)) { + String msg = String.format("CBO Can not handle Sub Query"); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + + // 1. Build Rel For Src (SubQuery, TS, Join) + // 1.1. Recurse over the subqueries to fill the subquery part of the plan + for (String subqAlias : qb.getSubqAliases()) { + QBExpr qbexpr = qb.getSubqForAlias(subqAlias); + aliasToRel.put(subqAlias, genLogicalPlan(qbexpr)); + qbexpr.setAlias(subqAlias); + } + + // 1.2 Recurse over all the source tables + for (String tableAlias : qb.getTabAliases()) { + RelNode op = genTableLogicalPlan(tableAlias, qb); + aliasToRel.put(tableAlias, op); + } + + if (aliasToRel.isEmpty()) { + // // This may happen for queries like select 1; (no source table) + // We can do following which is same, as what Hive does. + // With this, we will be able to generate Optiq plan. + // qb.getMetaData().setSrcForAlias(DUMMY_TABLE, getDummyTable()); + // RelNode op = genTableLogicalPlan(DUMMY_TABLE, qb); + // qb.addAlias(DUMMY_TABLE); + // qb.setTabAlias(DUMMY_TABLE, DUMMY_TABLE); + // aliasToRel.put(DUMMY_TABLE, op); + // However, Hive trips later while trying to get Metadata for this dummy + // table + // So, for now lets just disable this. Anyway there is nothing much to + // optimize in such cases. + throw new OptiqSemanticException("Unsupported"); + + } + // 1.3 process join + if (qb.getParseInfo().getJoinExpr() != null) { + srcRel = genJoinLogicalPlan(qb.getParseInfo().getJoinExpr(), aliasToRel); + } else { + // If no join then there should only be either 1 TS or 1 SubQuery + srcRel = aliasToRel.values().iterator().next(); + } + + // 2. Build Rel for where Clause + filterRel = genFilterLogicalPlan(qb, srcRel, aliasToRel, false); + srcRel = (filterRel == null) ? srcRel : filterRel; + + // 3. Build Rel for GB Clause + gbRel = genGBLogicalPlan(qb, srcRel); + srcRel = (gbRel == null) ? srcRel : gbRel; + + // 4. Build Rel for GB Having Clause + gbHavingRel = genGBHavingLogicalPlan(qb, srcRel, aliasToRel); + srcRel = (gbHavingRel == null) ? srcRel : gbHavingRel; + + // 5. Build Rel for Select Clause + selectRel = genSelectLogicalPlan(qb, srcRel); + srcRel = (selectRel == null) ? srcRel : selectRel; + + // 6. Build Rel for OB Clause + Pair obTopProjPair = genOBLogicalPlan(qb, srcRel, outerMostQB); + obRel = obTopProjPair.getKey(); + RelNode topConstrainingProjArgsRel = obTopProjPair.getValue(); + srcRel = (obRel == null) ? srcRel : obRel; + + // 7. Build Rel for Limit Clause + limitRel = genLimitLogicalPlan(qb, srcRel); + srcRel = (limitRel == null) ? srcRel : limitRel; + + // 8. Introduce top constraining select if needed. + // NOTES: + // 1. Optiq can not take an expr in OB; hence it needs to be added as VC + // in the input select; In such cases we need to introduce a select on top + // to ensure VC is not visible beyond Limit, OB. + // 2. Hive can not preserve order across select. In subqueries OB is used + // to get a deterministic set of tuples from following limit. Hence we + // introduce the constraining select above Limit (if present) instead of + // OB. + // 3. The top level OB will not introduce constraining select due to Hive + // limitation(#2) stated above. The RR for OB will not include VC. Thus + // Result Schema will not include exprs used by top OB. During AST Conv, + // in the PlanModifierForASTConv we would modify the top level OB to + // migrate exprs from input sel to SortRel (Note that Optiq doesn't + // support this; but since we are done with Optiq at this point its OK). + if (topConstrainingProjArgsRel != null) { + List originalInputRefs = Lists.transform(topConstrainingProjArgsRel.getRowType() + .getFieldList(), new Function() { + @Override + public RexNode apply(RelDataTypeField input) { + return new RexInputRef(input.getIndex(), input.getType()); + } + }); + RowResolver topConstrainingProjRR = new RowResolver(); + RowResolver.add(topConstrainingProjRR, this.relToHiveRR.get(topConstrainingProjArgsRel), 0); + srcRel = genSelectRelNode(originalInputRefs, topConstrainingProjRR, srcRel); + } + + // 9. Incase this QB corresponds to subquery then modify its RR to point + // to subquery alias + // TODO: cleanup this + if (qb.getParseInfo().getAlias() != null) { + RowResolver rr = this.relToHiveRR.get(srcRel); + RowResolver newRR = new RowResolver(); + String alias = qb.getParseInfo().getAlias(); + for (ColumnInfo colInfo : rr.getColumnInfos()) { + String name = colInfo.getInternalName(); + String[] tmp = rr.reverseLookup(name); + if ("".equals(tmp[0]) || tmp[1] == null) { + // ast expression is not a valid column name for table + tmp[1] = colInfo.getInternalName(); + } + ColumnInfo newCi = new ColumnInfo(colInfo); + newCi.setTabAlias(alias); + newRR.put(alias, tmp[1], newCi); + } + relToHiveRR.put(srcRel, newRR); + relToHiveColNameOptiqPosMap.put(srcRel, buildHiveToOptiqColumnMap(newRR, srcRel)); + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Created Plan for Query Block " + qb.getId()); + } + + return srcRel; + } + + private RelNode genGBHavingLogicalPlan(QB qb, RelNode srcRel, Map aliasToRel) + throws SemanticException { + RelNode gbFilter = null; + QBParseInfo qbp = getQBParseInfo(qb); + ASTNode havingClause = qbp.getHavingForClause(qbp.getClauseNames().iterator().next()); + + if (havingClause != null) { + validateNoHavingReferenceToAlias(qb, (ASTNode) havingClause.getChild(0)); + gbFilter = genFilterRelNode(qb, (ASTNode) havingClause.getChild(0), srcRel, aliasToRel, + true); + } + + return gbFilter; + } + + /* + * Bail if having clause uses Select Expression aliases for Aggregation + * expressions. We could do what Hive does. But this is non standard + * behavior. Making sure this doesn't cause issues when translating through + * Optiq is not worth it. + */ + private void validateNoHavingReferenceToAlias(QB qb, ASTNode havingExpr) + throws OptiqSemanticException { + + QBParseInfo qbPI = qb.getParseInfo(); + Map exprToAlias = qbPI.getAllExprToColumnAlias(); + /* + * a mouthful, but safe: + * - a QB is guaranteed to have atleast 1 destination + * - we don't support multi insert, so picking the first dest. + */ + Set aggExprs = qbPI.getDestToAggregationExprs().values() + .iterator().next().keySet(); + + for (Map.Entry selExpr : exprToAlias.entrySet()) { + ASTNode selAST = selExpr.getKey(); + if (!aggExprs.contains(selAST.toStringTree().toLowerCase())) { + continue; + } + final String aliasToCheck = selExpr.getValue(); + final Set aliasReferences = new HashSet(); + TreeVisitorAction action = new TreeVisitorAction() { + + @Override + public Object pre(Object t) { + if (ParseDriver.adaptor.getType(t) == HiveParser.TOK_TABLE_OR_COL) { + Object c = ParseDriver.adaptor.getChild(t, 0); + if (c != null + && ParseDriver.adaptor.getType(c) == HiveParser.Identifier + && ParseDriver.adaptor.getText(c).equals(aliasToCheck)) { + aliasReferences.add(t); + } + } + return t; + } + + @Override + public Object post(Object t) { + return t; + } + }; + new TreeVisitor(ParseDriver.adaptor).visit(havingExpr, action); + + if (aliasReferences.size() > 0) { + String havingClause = SemanticAnalyzer.this.ctx + .getTokenRewriteStream().toString( + havingExpr.getTokenStartIndex(), + havingExpr.getTokenStopIndex()); + String msg = String.format( + "Encountered Select alias '%s' in having clause '%s'" + + " This non standard behavior is not supported with cbo on." + + " Turn off cbo for these queries.", aliasToCheck, + havingClause); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + } + + } + + private ImmutableMap buildHiveToOptiqColumnMap(RowResolver rr, RelNode rNode) { + ImmutableMap.Builder b = new ImmutableMap.Builder(); + int i = 0; + for (ColumnInfo ci : rr.getRowSchema().getSignature()) { + b.put(ci.getInternalName(), rr.getPosition(ci.getInternalName())); + } + return b.build(); + } + + private ImmutableMap buildHiveColNameToInputPosMap( + List col_list, RowResolver inputRR) { + // Build a map of Hive column Names (ExprNodeColumnDesc Name) + // to the positions of those projections in the input + Map hashCodeTocolumnDescMap = new HashMap(); + ExprNodeDescUtils.getExprNodeColumnDesc(col_list, hashCodeTocolumnDescMap); + ImmutableMap.Builder hiveColNameToInputPosMapBuilder = new ImmutableMap.Builder(); + String exprNodecolName; + for (ExprNodeDesc exprDesc : hashCodeTocolumnDescMap.values()) { + exprNodecolName = ((ExprNodeColumnDesc) exprDesc).getColumn(); + hiveColNameToInputPosMapBuilder.put(exprNodecolName, inputRR.getPosition(exprNodecolName)); + } + + return hiveColNameToInputPosMapBuilder.build(); + } + + private QBParseInfo getQBParseInfo(QB qb) throws OptiqSemanticException { + QBParseInfo qbp = qb.getParseInfo(); + if (qbp.getClauseNames().size() > 1) { + String msg = String.format("Multi Insert is currently not supported in CBO," + + " turn off cbo to use Multi Insert."); + LOG.debug(msg); + throw new OptiqSemanticException(msg); + } + return qbp; + } + + private List getTabAliases(RowResolver inputRR) { + List tabAliases = new ArrayList(); + for (ColumnInfo ci : inputRR.getColumnInfos()) { + tabAliases.add(ci.getTabAlias()); + } + + return tabAliases; + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 0f714b5..23fbbe1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -26,6 +26,8 @@ import java.util.List; import java.util.Set; +import com.google.common.collect.Interner; +import com.google.common.collect.Interners; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; @@ -278,6 +280,11 @@ public void compile(final ParseContext pCtx, final List interner = Interners.newStrongInterner(); + for (Task rootTask : rootTasks) { + GenMapRedUtils.internTableDesc(rootTask, interner); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index 15b369b..3ef5189 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -36,7 +36,9 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; import org.apache.hadoop.hive.ql.exec.ConditionalTask; +import org.apache.hadoop.hive.ql.exec.DummyStoreOperator; import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.FilterOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; @@ -62,6 +64,7 @@ import org.apache.hadoop.hive.ql.optimizer.ConstantPropagate; import org.apache.hadoop.hive.ql.optimizer.ConvertJoinMapJoin; import org.apache.hadoop.hive.ql.optimizer.DynamicPartitionPruningOptimization; +import org.apache.hadoop.hive.ql.optimizer.MergeJoinProc; import org.apache.hadoop.hive.ql.optimizer.ReduceSinkMapJoinProc; import org.apache.hadoop.hive.ql.optimizer.RemoveDynamicPruningBySize; import org.apache.hadoop.hive.ql.optimizer.SetReducerParallelism; @@ -330,10 +333,17 @@ protected void generateTaskTree(List> rootTasks, Pa opRules.put(new RuleRegExp("No more walking on ReduceSink-MapJoin", MapJoinOperator.getOperatorName() + "%"), new ReduceSinkMapJoinProc()); + opRules.put(new RuleRegExp("Recoginze a Sorted Merge Join operator to setup the right edge and" + + " stop traversing the DummyStore-MapJoin", CommonMergeJoinOperator.getOperatorName() + + "%"), new MergeJoinProc()); + opRules.put(new RuleRegExp("Split Work + Move/Merge - FileSink", FileSinkOperator.getOperatorName() + "%"), new CompositeProcessor(new FileSinkProcessor(), genTezWork)); + opRules.put(new RuleRegExp("Split work - DummyStore", DummyStoreOperator.getOperatorName() + + "%"), genTezWork); + opRules.put(new RuleRegExp("Handle Potential Analyze Command", TableScanOperator.getOperatorName() + "%"), new ProcessAnalyzeTable(GenTezUtils.getUtils())); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java index a95ae20..3b6178f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java @@ -54,6 +54,19 @@ private boolean allowDistinctFunctions; + private final boolean allowGBExprElimination; + + private final boolean allowAllColRef; + + private final boolean allowFunctionStar; + + private final boolean allowWindowing; + + // "[]" : LSQUARE/INDEX Expression + private final boolean allowIndexExpr; + + private final boolean allowSubQueryExpr; + /** * Constructor. * @@ -61,10 +74,23 @@ * The input row resolver of the previous operator. */ public TypeCheckCtx(RowResolver inputRR) { + this(inputRR, false, true, true, true, true, true, true, true); + } + + public TypeCheckCtx(RowResolver inputRR, boolean allowStatefulFunctions, + boolean allowDistinctFunctions, boolean allowGBExprElimination, boolean allowAllColRef, + boolean allowFunctionStar, boolean allowWindowing, + boolean allowIndexExpr, boolean allowSubQueryExpr) { setInputRR(inputRR); error = null; - allowStatefulFunctions = false; - allowDistinctFunctions = true; + this.allowStatefulFunctions = allowStatefulFunctions; + this.allowDistinctFunctions = allowDistinctFunctions; + this.allowGBExprElimination = allowGBExprElimination; + this.allowAllColRef = allowAllColRef; + this.allowFunctionStar = allowFunctionStar; + this.allowWindowing = allowWindowing; + this.allowIndexExpr = allowIndexExpr; + this.allowSubQueryExpr = allowSubQueryExpr; } /** @@ -98,7 +124,8 @@ public UnparseTranslator getUnparseTranslator() { } /** - * @param allowStatefulFunctions whether to allow stateful UDF invocations + * @param allowStatefulFunctions + * whether to allow stateful UDF invocations */ public void setAllowStatefulFunctions(boolean allowStatefulFunctions) { this.allowStatefulFunctions = allowStatefulFunctions; @@ -136,7 +163,31 @@ public void setAllowDistinctFunctions(boolean allowDistinctFunctions) { this.allowDistinctFunctions = allowDistinctFunctions; } - public boolean isAllowDistinctFunctions() { + public boolean getAllowDistinctFunctions() { return allowDistinctFunctions; } + + public boolean getAllowGBExprElimination() { + return allowGBExprElimination; + } + + public boolean getallowAllColRef() { + return allowAllColRef; + } + + public boolean getallowFunctionStar() { + return allowFunctionStar; + } + + public boolean getallowWindowing() { + return allowWindowing; + } + + public boolean getallowIndexExpr() { + return allowIndexExpr; + } + + public boolean getallowSubQueryExpr() { + return allowSubQueryExpr; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java index 5c5589a..e065983 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java @@ -80,12 +80,12 @@ * expression Node Descriptor trees. They also introduce the correct conversion * functions to do proper implicit conversion. */ -public final class TypeCheckProcFactory { +public class TypeCheckProcFactory { protected static final Log LOG = LogFactory.getLog(TypeCheckProcFactory.class .getName()); - private TypeCheckProcFactory() { + protected TypeCheckProcFactory() { // prevent instantiation } @@ -118,7 +118,7 @@ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) RowResolver input = ctx.getInputRR(); ExprNodeDesc desc = null; - if ((ctx == null) || (input == null)) { + if ((ctx == null) || (input == null) || (!ctx.getAllowGBExprElimination())) { return null; } @@ -137,8 +137,13 @@ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) return desc; } - public static Map genExprNode(ASTNode expr, - TypeCheckCtx tcCtx) throws SemanticException { + public static Map genExprNode(ASTNode expr, TypeCheckCtx tcCtx) + throws SemanticException { + return genExprNode(expr, tcCtx, new TypeCheckProcFactory()); + } + + protected static Map genExprNode(ASTNode expr, + TypeCheckCtx tcCtx, TypeCheckProcFactory tf) throws SemanticException { // Create the walker, the rules dispatcher and the context. // create a walker which walks the tree in a DFS manner while maintaining // the operator stack. The dispatcher @@ -146,13 +151,13 @@ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) Map opRules = new LinkedHashMap(); opRules.put(new RuleRegExp("R1", HiveParser.TOK_NULL + "%"), - getNullExprProcessor()); + tf.getNullExprProcessor()); opRules.put(new RuleRegExp("R2", HiveParser.Number + "%|" + HiveParser.TinyintLiteral + "%|" + HiveParser.SmallintLiteral + "%|" + HiveParser.BigintLiteral + "%|" + HiveParser.DecimalLiteral + "%"), - getNumExprProcessor()); + tf.getNumExprProcessor()); opRules .put(new RuleRegExp("R3", HiveParser.Identifier + "%|" + HiveParser.StringLiteral + "%|" + HiveParser.TOK_CHARSETLITERAL + "%|" @@ -162,18 +167,18 @@ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) + HiveParser.KW_ARRAY + "%|" + HiveParser.KW_MAP + "%|" + HiveParser.KW_STRUCT + "%|" + HiveParser.KW_EXISTS + "%|" + HiveParser.TOK_SUBQUERY_OP_NOTIN + "%"), - getStrExprProcessor()); + tf.getStrExprProcessor()); opRules.put(new RuleRegExp("R4", HiveParser.KW_TRUE + "%|" - + HiveParser.KW_FALSE + "%"), getBoolExprProcessor()); - opRules.put(new RuleRegExp("R5", HiveParser.TOK_DATELITERAL + "%"), getDateExprProcessor()); + + HiveParser.KW_FALSE + "%"), tf.getBoolExprProcessor()); + opRules.put(new RuleRegExp("R5", HiveParser.TOK_DATELITERAL + "%"), tf.getDateExprProcessor()); opRules.put(new RuleRegExp("R6", HiveParser.TOK_TABLE_OR_COL + "%"), - getColumnExprProcessor()); + tf.getColumnExprProcessor()); opRules.put(new RuleRegExp("R7", HiveParser.TOK_SUBQUERY_OP + "%"), - getSubQueryExprProcessor()); + tf.getSubQueryExprProcessor()); // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along - Dispatcher disp = new DefaultRuleDispatcher(getDefaultExprProcessor(), + Dispatcher disp = new DefaultRuleDispatcher(tf.getDefaultExprProcessor(), opRules, tcCtx); GraphWalker ogw = new DefaultGraphWalker(disp); @@ -229,7 +234,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return NullExprProcessor. */ - public static NullExprProcessor getNullExprProcessor() { + public NullExprProcessor getNullExprProcessor() { return new NullExprProcessor(); } @@ -304,7 +309,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return NumExprProcessor. */ - public static NumExprProcessor getNumExprProcessor() { + public NumExprProcessor getNumExprProcessor() { return new NumExprProcessor(); } @@ -362,7 +367,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return StrExprProcessor. */ - public static StrExprProcessor getStrExprProcessor() { + public StrExprProcessor getStrExprProcessor() { return new StrExprProcessor(); } @@ -408,7 +413,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return BoolExprProcessor. */ - public static BoolExprProcessor getBoolExprProcessor() { + public BoolExprProcessor getBoolExprProcessor() { return new BoolExprProcessor(); } @@ -449,7 +454,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return DateExprProcessor. */ - public static DateExprProcessor getDateExprProcessor() { + public DateExprProcessor getDateExprProcessor() { return new DateExprProcessor(); } @@ -546,7 +551,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return ColumnExprProcessor. */ - public static ColumnExprProcessor getColumnExprProcessor() { + public ColumnExprProcessor getColumnExprProcessor() { return new ColumnExprProcessor(); } @@ -613,7 +618,7 @@ public static ColumnExprProcessor getColumnExprProcessor() { windowingTokens.add(HiveParser.TOK_TABSORTCOLNAMEDESC); } - private static boolean isRedundantConversionFunction(ASTNode expr, + protected static boolean isRedundantConversionFunction(ASTNode expr, boolean isFunction, ArrayList children) { if (!isFunction) { return false; @@ -700,7 +705,30 @@ public static ExprNodeDesc getFuncExprNodeDesc(String udfName, return getFuncExprNodeDescWithUdfData(udfName, null, children); } - static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, + protected void validateUDF(ASTNode expr, boolean isFunction, TypeCheckCtx ctx, FunctionInfo fi, + List children, GenericUDF genericUDF) throws SemanticException { + // Detect UDTF's in nested SELECT, GROUP BY, etc as they aren't + // supported + if (fi.getGenericUDTF() != null) { + throw new SemanticException(ErrorMsg.UDTF_INVALID_LOCATION.getMsg()); + } + // UDAF in filter condition, group-by caluse, param of funtion, etc. + if (fi.getGenericUDAFResolver() != null) { + if (isFunction) { + throw new SemanticException(ErrorMsg.UDAF_INVALID_LOCATION.getMsg((ASTNode) expr + .getChild(0))); + } else { + throw new SemanticException(ErrorMsg.UDAF_INVALID_LOCATION.getMsg(expr)); + } + } + if (!ctx.getAllowStatefulFunctions() && (genericUDF != null)) { + if (FunctionRegistry.isStateful(genericUDF)) { + throw new SemanticException(ErrorMsg.UDF_STATEFUL_INVALID_LOCATION.getMsg()); + } + } + } + + protected ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, boolean isFunction, ArrayList children, TypeCheckCtx ctx) throws SemanticException, UDFArgumentException { // return the child directly if the conversion is redundant. @@ -713,6 +741,7 @@ static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, ExprNodeDesc desc; if (funcText.equals(".")) { // "." : FIELD Expression + assert (children.size() == 2); // Only allow constant field name for now assert (children.get(1) instanceof ExprNodeConstantDesc); @@ -727,23 +756,22 @@ static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, // Allow accessing a field of list element structs directly from a list boolean isList = (object.getTypeInfo().getCategory() == ObjectInspector.Category.LIST); if (isList) { - objectTypeInfo = ((ListTypeInfo) objectTypeInfo) - .getListElementTypeInfo(); + objectTypeInfo = ((ListTypeInfo) objectTypeInfo).getListElementTypeInfo(); } if (objectTypeInfo.getCategory() != Category.STRUCT) { throw new SemanticException(ErrorMsg.INVALID_DOT.getMsg(expr)); } - TypeInfo t = ((StructTypeInfo) objectTypeInfo) - .getStructFieldTypeInfo(fieldNameString); + TypeInfo t = ((StructTypeInfo) objectTypeInfo).getStructFieldTypeInfo(fieldNameString); if (isList) { t = TypeInfoFactory.getListTypeInfo(t); } - desc = new ExprNodeFieldDesc(t, children.get(0), fieldNameString, - isList); - + desc = new ExprNodeFieldDesc(t, children.get(0), fieldNameString, isList); } else if (funcText.equals("[")) { // "[]" : LSQUARE/INDEX Expression + if (!ctx.getallowIndexExpr()) + throw new SemanticException(ErrorMsg.INVALID_FUNCTION.getMsg(expr)); + assert (children.size() == 2); // Check whether this is a list or a map @@ -759,8 +787,7 @@ static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, // Calculate TypeInfo TypeInfo t = ((ListTypeInfo) myt).getListElementTypeInfo(); - desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry - .getGenericUDFForIndex(), children); + desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry.getGenericUDFForIndex(), children); } else if (myt.getCategory() == Category.MAP) { if (!FunctionRegistry.implicitConvertible(children.get(1).getTypeInfo(), ((MapTypeInfo) myt).getMapKeyTypeInfo())) { @@ -769,11 +796,9 @@ static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, } // Calculate TypeInfo TypeInfo t = ((MapTypeInfo) myt).getMapValueTypeInfo(); - desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry - .getGenericUDFForIndex(), children); + desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry.getGenericUDFForIndex(), children); } else { - throw new SemanticException(ErrorMsg.NON_COLLECTION_TYPE.getMsg(expr, - myt.getTypeName())); + throw new SemanticException(ErrorMsg.NON_COLLECTION_TYPE.getMsg(expr, myt.getTypeName())); } } else { // other operators or functions @@ -825,26 +850,7 @@ static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, } } - // Detect UDTF's in nested SELECT, GROUP BY, etc as they aren't - // supported - if (fi.getGenericUDTF() != null) { - throw new SemanticException(ErrorMsg.UDTF_INVALID_LOCATION.getMsg()); - } - // UDAF in filter condition, group-by caluse, param of funtion, etc. - if (fi.getGenericUDAFResolver() != null) { - if (isFunction) { - throw new SemanticException(ErrorMsg.UDAF_INVALID_LOCATION. - getMsg((ASTNode) expr.getChild(0))); - } else { - throw new SemanticException(ErrorMsg.UDAF_INVALID_LOCATION.getMsg(expr)); - } - } - if (!ctx.getAllowStatefulFunctions() && (genericUDF != null)) { - if (FunctionRegistry.isStateful(genericUDF)) { - throw new SemanticException( - ErrorMsg.UDF_STATEFUL_INVALID_LOCATION.getMsg()); - } - } + validateUDF(expr, isFunction, ctx, fi, children, genericUDF); // Try to infer the type of the constant only if there are two // nodes, one of them is column and the other is numeric const @@ -955,6 +961,24 @@ private boolean isDescendant(Node ans, Node des) { return false; } + protected ExprNodeColumnDesc processQualifiedColRef(TypeCheckCtx ctx, ASTNode expr, + Object... nodeOutputs) throws SemanticException { + RowResolver input = ctx.getInputRR(); + String tableAlias = BaseSemanticAnalyzer.unescapeIdentifier(expr.getChild(0).getChild(0) + .getText()); + // NOTE: tableAlias must be a valid non-ambiguous table alias, + // because we've checked that in TOK_TABLE_OR_COL's process method. + ColumnInfo colInfo = input.get(tableAlias, ((ExprNodeConstantDesc) nodeOutputs[1]).getValue() + .toString()); + + if (colInfo == null) { + ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(1)), expr); + return null; + } + return new ExprNodeColumnDesc(colInfo.getType(), colInfo.getInternalName(), + colInfo.getTabAlias(), colInfo.getIsVirtualCol()); + } + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { @@ -1004,7 +1028,11 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * The difference is that there is translation for Window related tokens, so we just * return null; */ - if ( windowingTokens.contains(expr.getType())) { + if (windowingTokens.contains(expr.getType())) { + if (!ctx.getallowWindowing()) + throw new SemanticException(SemanticAnalyzer.generateErrorMessage(expr, + ErrorMsg.INVALID_FUNCTION.getMsg("Windowing is not supported in the context"))); + return null; } @@ -1013,6 +1041,11 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } if (expr.getType() == HiveParser.TOK_ALLCOLREF) { + if (!ctx.getallowAllColRef()) + throw new SemanticException(SemanticAnalyzer.generateErrorMessage(expr, + ErrorMsg.INVALID_COLUMN + .getMsg("All column reference is not supported in the context"))); + RowResolver input = ctx.getInputRR(); ExprNodeColumnListDesc columnList = new ExprNodeColumnListDesc(); assert expr.getChildCount() <= 1; @@ -1050,22 +1083,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, if (expr.getType() == HiveParser.DOT && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL && nodeOutputs[0] == null) { - - RowResolver input = ctx.getInputRR(); - String tableAlias = BaseSemanticAnalyzer.unescapeIdentifier(expr - .getChild(0).getChild(0).getText()); - // NOTE: tableAlias must be a valid non-ambiguous table alias, - // because we've checked that in TOK_TABLE_OR_COL's process method. - ColumnInfo colInfo = input.get(tableAlias, - ((ExprNodeConstantDesc) nodeOutputs[1]).getValue().toString()); - - if (colInfo == null) { - ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(1)), expr); - return null; - } - return new ExprNodeColumnDesc(colInfo.getType(), colInfo - .getInternalName(), colInfo.getTabAlias(), colInfo - .getIsVirtualCol()); + return processQualifiedColRef(ctx, expr, nodeOutputs); } // Return nulls for conversion operators @@ -1080,7 +1098,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, expr.getType() == HiveParser.TOK_FUNCTIONSTAR || expr.getType() == HiveParser.TOK_FUNCTIONDI); - if (!ctx.isAllowDistinctFunctions() && expr.getType() == HiveParser.TOK_FUNCTIONDI) { + if (!ctx.getAllowDistinctFunctions() && expr.getType() == HiveParser.TOK_FUNCTIONDI) { throw new SemanticException( SemanticAnalyzer.generateErrorMessage(expr, ErrorMsg.DISTINCT_NOT_SUPPORTED.getMsg())); } @@ -1099,6 +1117,11 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } if (expr.getType() == HiveParser.TOK_FUNCTIONSTAR) { + if (!ctx.getallowFunctionStar()) + throw new SemanticException(SemanticAnalyzer.generateErrorMessage(expr, + ErrorMsg.INVALID_COLUMN + .getMsg(".* reference is not supported in the context"))); + RowResolver input = ctx.getInputRR(); for (ColumnInfo colInfo : input.getColumnInfos()) { if (!colInfo.getIsVirtualCol()) { @@ -1111,8 +1134,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // If any of the children contains null, then return a null // this is a hack for now to handle the group by case if (children.contains(null)) { - RowResolver input = ctx.getInputRR(); - List possibleColumnNames = input.getReferenceableColumnAliases(null, -1); + List possibleColumnNames = getReferenceableColumnAliases(ctx); String reason = String.format("(possible column names are: %s)", StringUtils.join(possibleColumnNames, ", ")); ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(0), reason), @@ -1135,6 +1157,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } } + protected List getReferenceableColumnAliases(TypeCheckCtx ctx) { + return ctx.getInputRR().getReferenceableColumnAliases(null, -1); + } } /** @@ -1142,7 +1167,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return DefaultExprProcessor. */ - public static DefaultExprProcessor getDefaultExprProcessor() { + public DefaultExprProcessor getDefaultExprProcessor() { return new DefaultExprProcessor(); } @@ -1160,13 +1185,18 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } + ASTNode expr = (ASTNode) nd; + ASTNode sqNode = (ASTNode) expr.getParent().getChild(1); + + if (!ctx.getallowSubQueryExpr()) + throw new SemanticException(SemanticAnalyzer.generateErrorMessage(sqNode, + ErrorMsg.UNSUPPORTED_SUBQUERY_EXPRESSION.getMsg())); + ExprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); if (desc != null) { return desc; } - ASTNode expr = (ASTNode) nd; - ASTNode sqNode = (ASTNode) expr.getParent().getChild(1); /* * Restriction.1.h :: SubQueries only supported in the SQL Where Clause. */ @@ -1182,7 +1212,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return DateExprProcessor. */ - public static SubQueryExprProcessor getSubQueryExprProcessor() { + public SubQueryExprProcessor getSubQueryExprProcessor() { return new SubQueryExprProcessor(); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java index 82c8333..14ee3f0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java @@ -17,6 +17,13 @@ */ package org.apache.hadoop.hive.ql.parse; +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.Context; @@ -27,19 +34,12 @@ import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.InvalidTableException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.session.SessionState; -import java.io.IOException; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - - /** * A subclass of the {@link org.apache.hadoop.hive.ql.parse.SemanticAnalyzer} that just handles * update and delete statements. It works by rewriting the updates and deletes into insert @@ -128,11 +128,16 @@ private void reparseAndSuperAnalyze(ASTNode tree) throws SemanticException { Table mTable; try { mTable = db.getTable(tableName[0], tableName[1]); + } catch (InvalidTableException e) { + LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + + e.getMessage()); + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(getDotName(tableName)), e); } catch (HiveException e) { - LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + - e.getMessage()); - throw new SemanticException(ErrorMsg.INVALID_TABLE, getDotName(tableName)); + LOG.error("Failed to find table " + getDotName(tableName) + " got exception " + + e.getMessage()); + throw new SemanticException(e.getMessage(), e); } + List partCols = mTable.getPartCols(); rewrittenQueryStr.append("insert into table "); @@ -343,8 +348,10 @@ private void reparseAndSuperAnalyze(ASTNode tree) throws SemanticException { // Add the setRCols to the input list for (String colName : setRCols) { - columnAccessInfo.add(Table.getCompleteName(mTable.getDbName(), mTable.getTableName()), + if(columnAccessInfo != null) {//assuming this means we are not doing Auth + columnAccessInfo.add(Table.getCompleteName(mTable.getDbName(), mTable.getTableName()), colName); + } } } @@ -386,7 +393,7 @@ private void addSetRCols(ASTNode node, Set setRCols) { setRCols.add(colName.getText()); } else if (node.getChildren() != null) { for (Node n : node.getChildren()) { - addSetRCols(node, setRCols); + addSetRCols((ASTNode)n, setRCols); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java index 8517319..298bbca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java @@ -110,10 +110,12 @@ public AlterTableDesc() { * @param newComment * @param newType */ - public AlterTableDesc(String tblName, String oldColName, String newColName, + public AlterTableDesc(String tblName, HashMap partSpec, + String oldColName, String newColName, String newType, String newComment, boolean first, String afterCol) { super(); oldName = tblName; + this.partSpec = partSpec; this.oldColName = oldColName; this.newColName = newColName; newColType = newType; @@ -142,11 +144,12 @@ public AlterTableDesc(String oldName, String newName, boolean expectView) { * @param newCols * new columns to be added */ - public AlterTableDesc(String name, List newCols, + public AlterTableDesc(String name, HashMap partSpec, List newCols, AlterTableTypes alterType) { op = alterType; oldName = name; this.newCols = new ArrayList(newCols); + this.partSpec = partSpec; } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java index 3560442..05be1f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/BaseWork.java @@ -41,6 +41,7 @@ // Their function is mainly as root ops to give the mapjoin the correct // schema info. List dummyOps; + int tag; public BaseWork() {} @@ -100,7 +101,7 @@ public void addDummyOp(HashTableDummyOperator dummyOp) { // add all children opStack.addAll(opSet); - + while(!opStack.empty()) { Operator op = opStack.pop(); returnSet.add(op); @@ -139,4 +140,12 @@ public boolean getVectorMode() { } public abstract void configureJobConf(JobConf job); + + public void setTag(int tag) { + this.tag = tag; + } + + public int getTag() { + return tag; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CommonMergeJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CommonMergeJoinDesc.java new file mode 100644 index 0000000..b17de7f --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CommonMergeJoinDesc.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.Map; + +import org.apache.hadoop.hive.ql.exec.Operator; + +@Explain(displayName = "Merge Join Operator") +public class CommonMergeJoinDesc extends MapJoinDesc implements Serializable { + private static final long serialVersionUID = 1L; + private int numBuckets; + private boolean isSubQuery; + private int mapJoinConversionPos; + + CommonMergeJoinDesc() { + } + + public CommonMergeJoinDesc(int numBuckets, boolean isSubQuery, int mapJoinConversionPos, + MapJoinDesc joinDesc) { + super(joinDesc); + this.numBuckets = numBuckets; + this.isSubQuery = isSubQuery; + this.mapJoinConversionPos = mapJoinConversionPos; + } + + public boolean getCustomMerge() { + return isSubQuery; + } + + public int getNumBuckets() { + return numBuckets; + } + + public int getBigTablePosition() { + return mapJoinConversionPos; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java index ba30e1f..1e9b543 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java @@ -52,6 +52,7 @@ public int getPrefixLength() { ArrayList partSpecs; boolean expectView; boolean ifExists; + boolean ifPurge; boolean ignoreProtection; public DropTableDesc() { @@ -59,12 +60,14 @@ public DropTableDesc() { /** * @param tableName + * @param ifPurge */ - public DropTableDesc(String tableName, boolean expectView, boolean ifExists) { + public DropTableDesc(String tableName, boolean expectView, boolean ifExists, boolean ifPurge) { this.tableName = tableName; this.partSpecs = null; this.expectView = expectView; this.ifExists = ifExists; + this.ifPurge = ifPurge; this.ignoreProtection = false; } @@ -149,4 +152,19 @@ public boolean getIfExists() { public void setIfExists(boolean ifExists) { this.ifExists = ifExists; } + + /** + * @return whether Purge was specified + */ + public boolean getIfPurge() { + return ifPurge; + } + + /** + * @param ifPurge + * set whether Purge was specified + */ + public void setIfPurge(boolean ifPurge) { + this.ifPurge = ifPurge; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java index 3295aba..8a41577 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.BaseCharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -93,7 +94,7 @@ public String getExprString() { return "null"; } - if (typeInfo.getTypeName().equals(serdeConstants.STRING_TYPE_NAME)) { + if (typeInfo.getTypeName().equals(serdeConstants.STRING_TYPE_NAME) || typeInfo instanceof BaseCharTypeInfo) { return "'" + value.toString() + "'"; } else if (typeInfo.getTypeName().equals(serdeConstants.BINARY_TYPE_NAME)) { byte[] bytes = (byte[]) value; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java index f293c43..75372cd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDescUtils.java @@ -372,5 +372,42 @@ private static ExprNodeConstantDesc foldConstant(ExprNodeGenericFuncDesc func) { } catch (Exception e) { return null; } - } + } + + public static void getExprNodeColumnDesc(List exprDescList, + Map hashCodeTocolumnDescMap) { + for (ExprNodeDesc exprNodeDesc : exprDescList) { + getExprNodeColumnDesc(exprNodeDesc, hashCodeTocolumnDescMap); + } + } + + /** + * Get Map of ExprNodeColumnDesc HashCode to ExprNodeColumnDesc. + * + * @param exprDesc + * @param hashCodeTocolumnDescMap + * Assumption: If two ExprNodeColumnDesc have same hash code then + * they are logically referring to same projection + */ + public static void getExprNodeColumnDesc(ExprNodeDesc exprDesc, + Map hashCodeTocolumnDescMap) { + if (exprDesc instanceof ExprNodeColumnDesc) { + hashCodeTocolumnDescMap.put( + ((ExprNodeColumnDesc) exprDesc).hashCode(), + ((ExprNodeColumnDesc) exprDesc)); + } else if (exprDesc instanceof ExprNodeColumnListDesc) { + for (ExprNodeDesc child : ((ExprNodeColumnListDesc) exprDesc) + .getChildren()) { + getExprNodeColumnDesc(child, hashCodeTocolumnDescMap); + } + } else if (exprDesc instanceof ExprNodeGenericFuncDesc) { + for (ExprNodeDesc child : ((ExprNodeGenericFuncDesc) exprDesc) + .getChildren()) { + getExprNodeColumnDesc(child, hashCodeTocolumnDescMap); + } + } else if (exprDesc instanceof ExprNodeFieldDesc) { + getExprNodeColumnDesc(((ExprNodeFieldDesc) exprDesc).getDesc(), + hashCodeTocolumnDescMap); + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java index 57ab9de..d43bd60 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java @@ -69,6 +69,7 @@ // Hash table memory usage allowed; used in case of non-staged mapjoin. private float hashtableMemoryUsage; + protected boolean genJoinKeys = true; public MapJoinDesc() { bigTableBucketNumMapping = new LinkedHashMap(); @@ -122,6 +123,7 @@ private void initRetainExprList() { } } + @Explain(displayName = "input vertices") public Map getParentToInput() { return parentToInput; } @@ -331,4 +333,16 @@ public void setCustomBucketMapJoin(boolean customBucketMapJoin) { public boolean getCustomBucketMapJoin() { return this.customBucketMapJoin; } + + public boolean isMapSideJoin() { + return true; + } + + public void setGenJoinKeys(boolean genJoinKeys) { + this.genJoinKeys = genJoinKeys; + } + + public boolean getGenJoinKeys() { + return genJoinKeys; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java index 15a97ab..a808fc9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java @@ -30,6 +30,7 @@ import java.util.Map.Entry; import java.util.Set; +import com.google.common.collect.Interner; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; @@ -96,6 +97,7 @@ private Long minSplitSize; private Long minSplitSizePerNode; private Long minSplitSizePerRack; + private final int tag = 0; //use sampled partitioning private int samplingType; @@ -126,6 +128,8 @@ private Map> eventSourcePartKeyExprMap = new LinkedHashMap>(); + private boolean doSplitsGrouping = true; + public MapWork() {} public MapWork(String name) { @@ -195,6 +199,22 @@ public void deriveExplainAttributes() { } } + public void internTable(Interner interner) { + if (aliasToPartnInfo != null) { + for (PartitionDesc part : aliasToPartnInfo.values()) { + if (part == null) { + continue; + } + part.intern(interner); + } + } + if (pathToPartitionInfo != null) { + for (PartitionDesc part : pathToPartitionInfo.values()) { + part.intern(interner); + } + } + } + /** * @return the aliasToPartnInfo */ @@ -567,4 +587,12 @@ public void setEventSourceColumnNameMap(Map> map) { public void setEventSourcePartKeyExprMap(Map> map) { this.eventSourcePartKeyExprMap = map; } + + public void setDoSplitsGrouping(boolean doSplitsGrouping) { + this.doSplitsGrouping = doSplitsGrouping; + } + + public boolean getDoSplitsGrouping() { + return this.doSplitsGrouping; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java new file mode 100644 index 0000000..9e72ccc --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java @@ -0,0 +1,88 @@ +package org.apache.hadoop.hive.ql.plan; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.hive.ql.exec.CommonMergeJoinOperator; +import org.apache.hadoop.hive.ql.exec.HashTableDummyOperator; +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.mapred.JobConf; + +public class MergeJoinWork extends BaseWork { + + private CommonMergeJoinOperator mergeJoinOp = null; + private final List mergeWorkList = new ArrayList(); + private BaseWork bigTableWork; + + public MergeJoinWork() { + super(); + } + + @Override + public String getName() { + return super.getName(); + } + + @Override + public void replaceRoots(Map, Operator> replacementMap) { + getMainWork().replaceRoots(replacementMap); + } + + @Override + public Set> getAllRootOperators() { + return getMainWork().getAllRootOperators(); + } + + @Override + public void configureJobConf(JobConf job) { + } + + public CommonMergeJoinOperator getMergeJoinOperator() { + return this.mergeJoinOp; + } + + public void setMergeJoinOperator(CommonMergeJoinOperator mergeJoinOp) { + this.mergeJoinOp = mergeJoinOp; + } + + public void addMergedWork(BaseWork work, BaseWork connectWork) { + if (work != null) { + if ((bigTableWork != null) && (bigTableWork != work)) { + assert false; + } + this.bigTableWork = work; + setName(work.getName()); + } + + if (connectWork != null) { + this.mergeWorkList.add(connectWork); + } + } + + @Explain(skipHeader=true, displayName = "Join") + public List getBaseWorkList() { + return mergeWorkList; + } + + public String getBigTableAlias() { + return ((MapWork) bigTableWork).getAliasToWork().keySet().iterator().next(); + } + + @Explain(skipHeader=true, displayName = "Main") + public BaseWork getMainWork() { + return bigTableWork; + } + + @Override + public void setDummyOps(List dummyOps) { + getMainWork().setDummyOps(dummyOps); + } + + @Override + public void addDummyOp(HashTableDummyOperator dummyOp) { + getMainWork().addDummyOp(dummyOp); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java index 125ad21..c2b3664 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/OpTraits.java @@ -20,17 +20,16 @@ import java.util.List; -import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; - public class OpTraits { - + List> bucketColNames; + List> sortColNames; int numBuckets; - - public OpTraits(List> bucketColNames, int numBuckets) { + + public OpTraits(List> bucketColNames, int numBuckets, List> sortColNames) { this.bucketColNames = bucketColNames; this.numBuckets = numBuckets; + this.sortColNames = sortColNames; } public List> getBucketColNames() { @@ -42,10 +41,18 @@ public int getNumBuckets() { } public void setBucketColNames(List> bucketColNames) { - this.bucketColNames = bucketColNames; + this.bucketColNames = bucketColNames; } public void setNumBuckets(int numBuckets) { this.numBuckets = numBuckets; } + + public void setSortColNames(List> sortColNames) { + this.sortColNames = sortColNames; + } + + public List> getSortCols() { + return sortColNames; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java index 1149bda..10c38d9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java @@ -48,12 +48,10 @@ public class PartitionDesc implements Serializable, Cloneable { static { - TABLE_INTERNER = Interners.newWeakInterner(); STRING_INTERNER = Interners.newWeakInterner(); CLASS_INTERNER = Interners.newWeakInterner(); } - private static final Interner TABLE_INTERNER; private static final Interner STRING_INTERNER; private static final Interner> CLASS_INTERNER; @@ -73,12 +71,12 @@ public PartitionDesc() { } public PartitionDesc(final TableDesc table, final LinkedHashMap partSpec) { - setTableDesc(table); + this.tableDesc = table; this.partSpec = partSpec; } public PartitionDesc(final Partition part) throws HiveException { - setTableDesc(Utilities.getTableDesc(part.getTable())); + this.tableDesc = Utilities.getTableDesc(part.getTable()); setProperties(part.getMetadataFromPartitionSchema()); partSpec = part.getSpec(); setInputFileFormatClass(part.getInputFormatClass()); @@ -86,7 +84,7 @@ public PartitionDesc(final Partition part) throws HiveException { } public PartitionDesc(final Partition part,final TableDesc tblDesc) throws HiveException { - setTableDesc(tblDesc); + this.tableDesc = tblDesc; setProperties(part.getSchemaFromTableSchema(tblDesc.getProperties())); // each partition maintains a large properties partSpec = part.getSpec(); setOutputFileFormatClass(part.getInputFormatClass()); @@ -99,7 +97,7 @@ public TableDesc getTableDesc() { } public void setTableDesc(TableDesc tableDesc) { - this.tableDesc = TABLE_INTERNER.intern(tableDesc); + this.tableDesc = tableDesc; } @Explain(displayName = "partition values") @@ -266,4 +264,8 @@ public void deriveBaseFileName(String path) { baseFileName = path; } } + + public void intern(Interner interner) { + this.tableDesc = interner.intern(tableDesc); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java index 456b5eb..a03e373 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TezWork.java @@ -19,12 +19,13 @@ package org.apache.hadoop.hive.ql.plan; import java.io.Serializable; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedList; import java.util.LinkedHashMap; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -46,6 +47,22 @@ @Explain(displayName = "Tez") public class TezWork extends AbstractOperatorDesc { + public enum VertexType { + AUTO_INITIALIZED_EDGES, // no custom vertex or edge + INITIALIZED_EDGES, // custom vertex and custom edge but single MR Input + MULTI_INPUT_INITIALIZED_EDGES, // custom vertex, custom edge and multi MR Input + MULTI_INPUT_UNINITIALIZED_EDGES // custom vertex, no custom edge, multi MR Input + ; + + public static boolean isCustomInputType(VertexType vertex) { + if ((vertex == null) || (vertex == AUTO_INITIALIZED_EDGES)) { + return false; + } else { + return true; + } + } + } + private static transient final Log LOG = LogFactory.getLog(TezWork.class); private static int counter; @@ -56,6 +73,7 @@ private final Map> invertedWorkGraph = new HashMap>(); private final Map, TezEdgeProperty> edgeProperties = new HashMap, TezEdgeProperty>(); + private final Map workVertexTypeMap = new HashMap(); public TezWork(String name) { this.name = name + ":" + (++counter); @@ -305,15 +323,23 @@ public int compareTo(Dependency o) { work.configureJobConf(jobConf); } String[] newTmpJars = jobConf.getStrings(MR_JAR_PROPERTY); - if (oldTmpJars != null && (oldTmpJars.length != 0)) { - if (newTmpJars != null && (newTmpJars.length != 0)) { - String[] combinedTmpJars = new String[newTmpJars.length + oldTmpJars.length]; - System.arraycopy(oldTmpJars, 0, combinedTmpJars, 0, oldTmpJars.length); - System.arraycopy(newTmpJars, 0, combinedTmpJars, oldTmpJars.length, newTmpJars.length); - jobConf.setStrings(MR_JAR_PROPERTY, combinedTmpJars); + if (oldTmpJars != null || newTmpJars != null) { + String[] finalTmpJars; + if (oldTmpJars == null || oldTmpJars.length == 0) { + // Avoid a copy when oldTmpJars is null or empty + finalTmpJars = newTmpJars; + } else if (newTmpJars == null || newTmpJars.length == 0) { + // Avoid a copy when newTmpJars is null or empty + finalTmpJars = oldTmpJars; } else { - jobConf.setStrings(MR_JAR_PROPERTY, oldTmpJars); + // Both are non-empty, only copy now + finalTmpJars = new String[oldTmpJars.length + newTmpJars.length]; + System.arraycopy(oldTmpJars, 0, finalTmpJars, 0, oldTmpJars.length); + System.arraycopy(newTmpJars, 0, finalTmpJars, oldTmpJars.length, newTmpJars.length); } + + jobConf.setStrings(MR_JAR_PROPERTY, finalTmpJars); + return finalTmpJars; } return newTmpJars; } @@ -332,4 +358,40 @@ public void connect(BaseWork a, BaseWork b, ImmutablePair workPair = new ImmutablePair(a, b); edgeProperties.put(workPair, edgeProp); } + + public void setVertexType(BaseWork w, VertexType incomingVertexType) { + VertexType vertexType = workVertexTypeMap.get(w); + if (vertexType == null) { + vertexType = VertexType.AUTO_INITIALIZED_EDGES; + } + switch (vertexType) { + case INITIALIZED_EDGES: + if (incomingVertexType == VertexType.MULTI_INPUT_UNINITIALIZED_EDGES) { + vertexType = VertexType.MULTI_INPUT_INITIALIZED_EDGES; + } + break; + + case MULTI_INPUT_INITIALIZED_EDGES: + // nothing to do + break; + + case MULTI_INPUT_UNINITIALIZED_EDGES: + if (incomingVertexType == VertexType.INITIALIZED_EDGES) { + vertexType = VertexType.MULTI_INPUT_INITIALIZED_EDGES; + } + break; + + case AUTO_INITIALIZED_EDGES: + vertexType = incomingVertexType; + break; + + default: + break; + } + workVertexTypeMap.put(w, vertexType); + } + + public VertexType getVertexType(BaseWork w) { + return workVertexTypeMap.get(w); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java index 930285e..fc9d0bd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java @@ -23,7 +23,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.classification.InterfaceAudience.Private; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStorePreEventListener; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; @@ -40,6 +42,8 @@ import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent; import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; import org.apache.hadoop.hive.metastore.events.PreEventContext; +import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; @@ -54,6 +58,7 @@ * metastore PreEventContexts, such as the adding/dropping and altering * of databases, tables and partitions. */ +@Private public class AuthorizationPreEventListener extends MetaStorePreEventListener { public static final Log LOG = LogFactory.getLog( @@ -136,6 +141,12 @@ public void onEvent(PreEventContext context) throws MetaException, NoSuchObjectE case ALTER_TABLE: authorizeAlterTable((PreAlterTableEvent)context); break; + case READ_TABLE: + authorizeReadTable((PreReadTableEvent)context); + break; + case READ_DATABASE: + authorizeReadDatabase((PreReadDatabaseEvent)context); + break; case ADD_PARTITION: authorizeAddPartition((PreAddPartitionEvent)context); break; @@ -162,6 +173,44 @@ public void onEvent(PreEventContext context) throws MetaException, NoSuchObjectE } + private void authorizeReadTable(PreReadTableEvent context) throws InvalidOperationException, + MetaException { + if (!isReadAuthzEnabled()) { + return; + } + try { + org.apache.hadoop.hive.ql.metadata.Table wrappedTable = new TableWrapper(context.getTable()); + for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { + authorizer.authorize(wrappedTable, new Privilege[] { Privilege.SELECT }, null); + } + } catch (AuthorizationException e) { + throw invalidOperationException(e); + } catch (HiveException e) { + throw metaException(e); + } + } + + private void authorizeReadDatabase(PreReadDatabaseEvent context) + throws InvalidOperationException, MetaException { + if (!isReadAuthzEnabled()) { + return; + } + try { + for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { + authorizer.authorize(new Database(context.getDatabase()), + new Privilege[] { Privilege.SELECT }, null); + } + } catch (AuthorizationException e) { + throw invalidOperationException(e); + } catch (HiveException e) { + throw metaException(e); + } + } + + private boolean isReadAuthzEnabled() { + return tConfig.get().getBoolean(ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname, true); + } + private void authorizeAuthorizationAPICall() throws InvalidOperationException, MetaException { for (HiveMetastoreAuthorizationProvider authorizer : tAuthorizers.get()) { try { @@ -358,7 +407,7 @@ public PartitionWrapper(org.apache.hadoop.hive.ql.metadata.Table table, public PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart, PreEventContext context) throws HiveException, NoSuchObjectException, MetaException { org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = mapiPart.deepCopy(); - org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table( + org.apache.hadoop.hive.metastore.api.Table t = context.getHandler().get_table_core( mapiPart.getDbName(), mapiPart.getTableName()); if (wrapperApiPart.getSd() == null){ // In the cases of create partition, by the time this event fires, the partition diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java index 18a1b25..25c25da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveAuthorizationProviderBase.java @@ -85,7 +85,7 @@ public Database getDatabase(String dbName) throws HiveException { return hiveClient.getDatabase(dbName); } else { try { - return handler.get_database(dbName); + return handler.get_database_core(dbName); } catch (NoSuchObjectException e) { throw new HiveException(e); } catch (MetaException e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessControlException.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessControlException.java index f6f7e4a..d877686 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessControlException.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessControlException.java @@ -27,7 +27,7 @@ * an error while performing authorization, and not a authorization being * denied. */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public class HiveAccessControlException extends HiveException{ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessController.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessController.java index ede408b..a18eedc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessController.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAccessController.java @@ -19,8 +19,7 @@ import java.util.List; -import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; -import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.hive.conf.HiveConf; /** @@ -28,8 +27,7 @@ * create/drop roles, and commands to read the state of authorization rules. * Methods here have corresponding methods in HiveAuthorizer, check method documentation there. */ -@LimitedPrivate(value = { "" }) -@Evolving +@Private public interface HiveAccessController { void grantPrivileges(List hivePrincipals, List hivePrivileges, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizationValidator.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizationValidator.java index c2282df..5a5b3d5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizationValidator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizationValidator.java @@ -19,15 +19,13 @@ import java.util.List; -import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; -import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving; +import org.apache.hadoop.classification.InterfaceAudience.Private; /** * Interface used to check if user has privileges to perform certain action. * Methods here have corresponding methods in HiveAuthorizer, check method documentation there. */ -@LimitedPrivate(value = { "" }) -@Evolving +@Private public interface HiveAuthorizationValidator { /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java index 911a943..9c3a95c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizer.java @@ -34,7 +34,7 @@ * statements and does not make assumptions about the privileges needed for a hive operation. * This is referred to as V2 authorizer in other parts of the code. */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public interface HiveAuthorizer { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerFactory.java index 27fc128..e9628c0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthorizerFactory.java @@ -27,7 +27,7 @@ * create {@link HiveAuthorizer} instance used for hive authorization. * */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public interface HiveAuthorizerFactory { /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzContext.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzContext.java index 248b626..195e341 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzContext.java @@ -25,7 +25,7 @@ * auditing and/or authorization. * It is an immutable class. Builder inner class is used instantiate it. */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public final class HiveAuthzContext { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzPluginException.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzPluginException.java index 7e99930..80b7edc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzPluginException.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzPluginException.java @@ -27,7 +27,7 @@ * an error while performing authorization, and not a authorization being * denied. */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public class HiveAuthzPluginException extends HiveException{ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzSessionContext.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzSessionContext.java index b0aadd5..3d18dfd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzSessionContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveAuthzSessionContext.java @@ -24,7 +24,7 @@ * Provides session context information. * It is an immutable class. Builder inner class is used instantiate it. */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public final class HiveAuthzSessionContext { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveMetastoreClientFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveMetastoreClientFactory.java index 8ccd72b..6078a18 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveMetastoreClientFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveMetastoreClientFactory.java @@ -28,7 +28,7 @@ * But Hive class is not a public interface, so this factory helps in hiding Hive * class from the authorization interface users. */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public interface HiveMetastoreClientFactory { IMetaStoreClient getHiveMetastoreClient() throws HiveAuthzPluginException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java index c4469a5..b79c080 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java @@ -23,7 +23,7 @@ /** * List of hive operations types. */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public enum HiveOperationType { EXPLAIN, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrincipal.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrincipal.java index d8f530b..c5f4c40 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrincipal.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrincipal.java @@ -23,7 +23,7 @@ /** * Represents the user or role in grant/revoke statements */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public class HivePrincipal implements Comparable { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilege.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilege.java index 5e64b8d..3f69fc7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilege.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilege.java @@ -27,7 +27,7 @@ /** * Represents the hive privilege being granted/revoked */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public class HivePrivilege implements Comparable { @Override @@ -97,6 +97,7 @@ public boolean supportsScope(PrivilegeScope scope) { return supportedScope != null && supportedScope.contains(scope.name()); } + @Override public int compareTo(HivePrivilege privilege) { int compare = columns != null ? (privilege.columns != null ? compare(columns, privilege.columns) : 1) : diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeInfo.java index 0f91ccb..37cda95 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeInfo.java @@ -23,7 +23,7 @@ /** * Represents a privilege granted for an object to a principal */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public class HivePrivilegeInfo{ private final HivePrincipal principal; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java index 01d9cb6..0364627 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HivePrivilegeObject.java @@ -22,14 +22,19 @@ import java.util.Iterator; import java.util.List; +import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; -import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; /** - * Represents the object on which privilege is being granted/revoked + * Represents the object on which privilege is being granted/revoked, and objects + * being used in queries. + * + * Check the get* function documentation for information on what value it returns based on + * the {@link HivePrivilegeObjectType}. + * */ -@LimitedPrivate(value = { "" }) -@Unstable +@LimitedPrivate(value = { "Apache Argus (incubating)" }) +@Evolving public class HivePrivilegeObject implements Comparable { @Override @@ -77,9 +82,20 @@ private int compare(Collection o1, Collection o2) { return o1.size() > o2.size() ? 1 : (o1.size() < o2.size() ? -1 : 0); } + /** + * Note that GLOBAL, PARTITION, COLUMN fields are populated only for Hive's old default + * authorization mode. + * When the authorization manager is an instance of HiveAuthorizerFactory, these types are not + * used. + */ public enum HivePrivilegeObjectType { GLOBAL, DATABASE, TABLE_OR_VIEW, PARTITION, COLUMN, LOCAL_URI, DFS_URI, COMMAND_PARAMS, FUNCTION - } ; + }; + + /** + * When {@link HiveOperationType} is QUERY, this action type is set so that it is possible + * to determine if the action type on this object is an INSERT or INSERT_OVERWRITE + */ public enum HivePrivObjectActionType { OTHER, INSERT, INSERT_OVERWRITE, UPDATE, DELETE }; @@ -139,6 +155,9 @@ public HivePrivilegeObjectType getType() { return type; } + /** + * @return the db name if type is DATABASE, TABLE, or FUNCTION + */ public String getDbname() { return dbname; } @@ -150,6 +169,10 @@ public String getObjectName() { return objectName; } + /** + * See javadoc of {@link HivePrivObjectActionType} + * @return action type + */ public HivePrivObjectActionType getActionType() { return actionType; } @@ -158,12 +181,15 @@ public HivePrivObjectActionType getActionType() { return commandParams; } + /** + * @return partiton key information. Used only for old default authorization mode. + */ public List getPartKeys() { return partKeys; } /** - * Applicable columns in this object + * Applicable columns in this object, when the type is {@link HivePrivilegeObjectType.TABLE} * In case of DML read operations, this is the set of columns being used. * Column information is not set for DDL operations and for tables being written into * @return list of applicable columns diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java index eb3bd8e..7c5546b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveRoleGrant.java @@ -26,7 +26,7 @@ /** * Represents a grant of a role to a principal */ -@LimitedPrivate(value = { "" }) +@LimitedPrivate(value = { "Apache Argus (incubating)" }) @Evolving public class HiveRoleGrant implements Comparable { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 3a14828..3b6bb5c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -1249,7 +1249,7 @@ public void close() throws IOException { try { if (tezSessionState != null) { - TezSessionPoolManager.getInstance().close(tezSessionState); + TezSessionPoolManager.getInstance().close(tezSessionState, false); } } catch (Exception e) { LOG.info("Error closing tez session", e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index b51f7a8..5143fbe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -18,8 +18,15 @@ package org.apache.hadoop.hive.ql.stats; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; +import java.math.BigDecimal; +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; @@ -80,19 +87,14 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableTimestampObjectInspector; import org.apache.hadoop.io.BytesWritable; -import java.math.BigDecimal; -import java.math.BigInteger; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import com.google.common.base.Joiner; +import com.google.common.collect.Lists; public class StatsUtils { private static final Log LOG = LogFactory.getLog(StatsUtils.class.getName()); + /** * Collect table, partition and column level statistics * @param conf @@ -109,15 +111,34 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList, Table table, TableScanOperator tableScanOperator) throws HiveException { - Statistics stats = new Statistics(); - // column level statistics are required only for the columns that are needed List schema = tableScanOperator.getSchema().getSignature(); List neededColumns = tableScanOperator.getNeededColumns(); + List referencedColumns = tableScanOperator.getReferencedColumns(); + + return collectStatistics(conf, partList, table, schema, neededColumns, referencedColumns); + } + + private static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList, + Table table, List schema, List neededColumns, + List referencedColumns) throws HiveException { + boolean fetchColStats = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_FETCH_COLUMN_STATS); boolean fetchPartStats = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_FETCH_PARTITION_STATS); + + return collectStatistics(conf, partList, table, schema, neededColumns, referencedColumns, + fetchColStats, fetchPartStats); + } + + public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList partList, + Table table, List schema, List neededColumns, + List referencedColumns, boolean fetchColStats, boolean fetchPartStats) + throws HiveException { + + Statistics stats = new Statistics(); + float deserFactor = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_STATS_DESERIALIZATION_FACTOR); @@ -207,7 +228,6 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList pa stats.getBasicStatsState().equals(State.COMPLETE)) { stats.setBasicStatsState(State.PARTIAL); } - boolean haveFullStats = fetchColStats; if (fetchColStats) { List partNames = new ArrayList(partList.getNotDeniedPartns().size()); for (Partition part : partList.getNotDeniedPartns()) { @@ -215,37 +235,84 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList pa } Map colToTabAlias = new HashMap(); neededColumns = processNeededColumns(schema, neededColumns, colToTabAlias); - AggrStats aggrStats = Hive.get().getAggrColStatsFor(table.getDbName(), table.getTableName(), neededColumns, partNames); + AggrStats aggrStats = Hive.get().getAggrColStatsFor(table.getDbName(), table.getTableName(), + neededColumns, partNames); if (null == aggrStats) { - haveFullStats = false; + // There are some partitions with no state (or we didn't fetch any state). + // Update the stats with empty list to reflect that in the + // state/initialize structures. + List emptyStats = Lists.newArrayList(); + + // add partition column stats + addParitionColumnStats(neededColumns, referencedColumns, schema, table, partList, + emptyStats); + + stats.addToColumnStats(emptyStats); + stats.updateColumnStatsState(deriveStatType(emptyStats, referencedColumns)); } else { List colStats = aggrStats.getColStats(); if (colStats.size() != neededColumns.size()) { - LOG.debug("Column stats requested for : " + neededColumns.size() + " columns. Able to retrieve" - + " for " + colStats.size() + " columns"); + LOG.debug("Column stats requested for : " + neededColumns.size() + " columns. Able to" + + " retrieve for " + colStats.size() + " columns"); } - List columnStats = convertColStats(colStats, table.getTableName(), colToTabAlias); + List columnStats = convertColStats(colStats, table.getTableName(), + colToTabAlias); + + addParitionColumnStats(neededColumns, referencedColumns, schema, table, partList, + columnStats); + stats.addToColumnStats(columnStats); - State colState = deriveStatType(columnStats, neededColumns); + State colState = deriveStatType(columnStats, referencedColumns); if (aggrStats.getPartsFound() != partNames.size() && colState != State.NONE) { - LOG.debug("Column stats requested for : " + partNames.size() +" partitions. " - + "Able to retrieve for " + aggrStats.getPartsFound() + " partitions"); + LOG.debug("Column stats requested for : " + partNames.size() + " partitions. " + + "Able to retrieve for " + aggrStats.getPartsFound() + " partitions"); colState = State.PARTIAL; } stats.setColumnStatsState(colState); } } - // There are some partitions with no state (or we didn't fetch any state). - // Update the stats with empty list to reflect that in the state/initialize structures. - if (!haveFullStats) { - List emptyStats = Lists.newArrayList(); - stats.addToColumnStats(emptyStats); - stats.updateColumnStatsState(deriveStatType(emptyStats, neededColumns)); - } } return stats; } + private static void addParitionColumnStats(List neededColumns, + List referencedColumns, List schema, Table table, + PrunedPartitionList partList, List colStats) + throws HiveException { + + // extra columns is difference between referenced columns vs needed + // columns. The difference could be partition columns. + List extraCols = Lists.newArrayList(referencedColumns); + if (referencedColumns.size() > neededColumns.size()) { + extraCols.removeAll(neededColumns); + for (String col : extraCols) { + for (ColumnInfo ci : schema) { + // conditions for being partition column + if (col.equals(ci.getInternalName()) && ci.getIsVirtualCol() && + !ci.isHiddenVirtualCol()) { + // currently metastore does not store column stats for + // partition column, so we calculate the NDV from pruned + // partition list + ColStatistics partCS = new ColStatistics(table.getTableName(), + ci.getInternalName(), ci.getType().getTypeName()); + long numPartitions = getNDVPartitionColumn(partList.getPartitions(), + ci.getInternalName()); + partCS.setCountDistint(numPartitions); + colStats.add(partCS); + } + } + } + } + } + + public static int getNDVPartitionColumn(Set partitions, String partColName) { + Set distinctVals = new HashSet(partitions.size()); + for (Partition partition : partitions) { + distinctVals.add(partition.getSpec().get(partColName)); + } + return distinctVals.size(); + } + private static void setUnknownRcDsToAverage( List rowCounts, List dataSizes, int avgRowSize) { if (LOG.isDebugEnabled()) { @@ -751,7 +818,8 @@ public static long getAvgColLenOfFixedLengthTypes(String colType) { || colType.equalsIgnoreCase(serdeConstants.FLOAT_TYPE_NAME)) { return JavaDataModel.get().primitive1(); } else if (colType.equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME) - || colType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME)) { + || colType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME) + || colType.equalsIgnoreCase("long")) { return JavaDataModel.get().primitive2(); } else if (colType.equalsIgnoreCase(serdeConstants.TIMESTAMP_TYPE_NAME)) { return JavaDataModel.get().lengthOfTimestamp(); @@ -780,7 +848,8 @@ public static long getSizeOfPrimitiveTypeArraysFromType(String colType, int leng return JavaDataModel.get().lengthForIntArrayOfSize(length); } else if (colType.equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME)) { return JavaDataModel.get().lengthForDoubleArrayOfSize(length); - } else if (colType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME)) { + } else if (colType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME) + || colType.equalsIgnoreCase("long")) { return JavaDataModel.get().lengthForLongArrayOfSize(length); } else if (colType.equalsIgnoreCase(serdeConstants.BINARY_TYPE_NAME)) { return JavaDataModel.get().lengthForByteArrayOfSize(length); @@ -876,7 +945,7 @@ public static long getWritableSize(ObjectInspector oi, Object value) { Statistics parentStats, Map colExprMap, RowSchema rowSchema) { List cs = Lists.newArrayList(); - if (colExprMap != null) { + if (colExprMap != null && rowSchema != null) { for (ColumnInfo ci : rowSchema.getSignature()) { String outColName = ci.getInternalName(); outColName = StatsUtils.stripPrefixFromColumnName(outColName); @@ -1042,10 +1111,8 @@ public static long getTotalSize(Table table) { /** * Get basic stats of table - * @param dbName - * - database name - * @param tabName - * - table name + * @param table + * - table * @param statType * - type of stats * @return value of stats diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index 4b0009f..c1d0fe1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -76,7 +76,7 @@ public void run() { // don't doom the entire thread. try { ShowCompactResponse currentCompactions = txnHandler.showCompact(new ShowCompactRequest()); - ValidTxnList txns = TxnHandler.createValidTxnList(txnHandler.getOpenTxns()); + ValidTxnList txns = TxnHandler.createValidTxnList(txnHandler.getOpenTxns(), 0); Set potentials = txnHandler.findPotentialCompactions(abortedThreshold); LOG.debug("Found " + potentials.size() + " potential compactions, " + "checking to see if we should compact any of them"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 347bf65..249fece 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -120,7 +120,7 @@ public void run() { final boolean isMajor = ci.isMajorCompaction(); final ValidTxnList txns = - TxnHandler.createValidTxnList(txnHandler.getOpenTxns()); + TxnHandler.createValidTxnList(txnHandler.getOpenTxns(), 0); final StringBuffer jobName = new StringBuffer(name); jobName.append("-compactor-"); jobName.append(ci.getFullPartitionName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog.java index cf25bfe..31e2878 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog.java @@ -52,29 +52,6 @@ public DoubleWritable evaluate(DoubleWritable base, DoubleWritable a) { } /** - * Get the logarithm of the given decimal with the given base. - */ - public DoubleWritable evaluate(DoubleWritable base, HiveDecimalWritable writable) { - if (base == null || writable == null) { - return null; - } - double d = writable.getHiveDecimal().bigDecimalValue().doubleValue(); - return log(base.get(), d); - } - - /** - * Get the logarithm of input with the given decimal as the base. - */ - public DoubleWritable evaluate(HiveDecimalWritable base, DoubleWritable d) { - if (base == null || d == null) { - return null; - } - - double b = base.getHiveDecimal().bigDecimalValue().doubleValue(); - return log(b, d.get()); - } - - /** * Get the logarithm of the given decimal input with the given decimal base. */ public DoubleWritable evaluate(HiveDecimalWritable baseWritable, HiveDecimalWritable writable) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java index 6131d3d..8399f26 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNumeric.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; @@ -44,6 +45,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils.PrimitiveGrouping; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.HiveDecimalUtils; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -318,4 +320,17 @@ public boolean isAnsiSqlArithmetic() { public void setAnsiSqlArithmetic(boolean ansiSqlArithmetic) { this.ansiSqlArithmetic = ansiSqlArithmetic; } + + public PrimitiveTypeInfo deriveMinArgumentCast( + ExprNodeDesc childExpr, TypeInfo targetType) { + assert targetType instanceof PrimitiveTypeInfo : "Not a primitive type" + targetType; + PrimitiveTypeInfo pti = (PrimitiveTypeInfo)targetType; + // We only do the minimum cast for decimals. Other types are assumed safe; fix if needed. + // We also don't do anything for non-primitive children (maybe we should assert). + if ((pti.getPrimitiveCategory() != PrimitiveCategory.DECIMAL) + || (!(childExpr.getTypeInfo() instanceof PrimitiveTypeInfo))) return pti; + PrimitiveTypeInfo childTi = (PrimitiveTypeInfo)childExpr.getTypeInfo(); + // If the child is also decimal, no cast is needed (we hope - can target type be narrower?). + return HiveDecimalUtils.getDecimalTypeForPrimitiveCategory(childTi); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java index a0aeccf..f76fc10 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFromUtcTimestamp.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -33,7 +34,9 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.Text; - +@Description(name = "from_utc_timestamp", + value = "from_utc_timestamp(timestamp, string timezone) - " + + "Assumes given timestamp ist UTC and converts to given timezone (as of Hive 0.8.0)") public class GenericUDFFromUtcTimestamp extends GenericUDF { static final Log LOG = LogFactory.getLog(GenericUDFFromUtcTimestamp.class); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java index 45bf05a..7009026 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIf.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; @@ -62,6 +63,11 @@ * otherwise it returns expr3. IF() returns a numeric or string value, depending * on the context in which it is used. */ +@Description( + name = "if", + value = "IF(expr1,expr2,expr3) - If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then" + + " IF() returns expr2; otherwise it returns expr3. IF() returns a numeric or string value," + + " depending on the context in which it is used.") @VectorizedExpressions({ IfExprLongColumnLongColumn.class, IfExprDoubleColumnDoubleColumn.class, IfExprLongColumnLongScalar.class, IfExprDoubleColumnDoubleScalar.class, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java index ba4fed7..2f854f4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTimestamp.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.vector.VectorizedExpressions; @@ -39,6 +40,8 @@ * Creates a TimestampWritable object using PrimitiveObjectInspectorConverter * */ +@Description(name = "timestamp", +value = "cast(date as timestamp) - Returns timestamp") @VectorizedExpressions({CastLongToTimestampViaLongToLong.class, CastDoubleToTimestampViaDoubleToLong.class, CastDecimalToTimestamp.class}) public class GenericUDFTimestamp extends GenericUDF { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java index af4da3a..4234346 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToUtcTimestamp.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; +@Description(name = "to_utc_timestamp", + value = "to_utc_timestamp(timestamp, string timezone) - " + + "Assumes given timestamp is in given timezone and converts to UTC (as of Hive 0.8.0)") public class GenericUDFToUtcTimestamp extends GenericUDFFromUtcTimestamp { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java index 640a9f9..36b4a43 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java @@ -331,7 +331,8 @@ public void testMapOperator() throws Throwable { Configuration hconf = new JobConf(TestOperators.class); HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME, "hdfs:///testDir/testFile"); - IOContext.get().setInputPath(new Path("hdfs:///testDir/testFile")); + IOContext.get(hconf).setInputPath( + new Path("hdfs:///testDir/testFile")); // initialize pathToAliases ArrayList aliases = new ArrayList(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java index ad5a6e7..c6ac557 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezSessionPool.java @@ -26,6 +26,7 @@ import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; import org.apache.hadoop.hive.conf.HiveConf; public class TestTezSessionPool { @@ -157,4 +158,29 @@ public void testReturn() { } } } + + @Test + public void testCloseAndOpenDefault() throws Exception { + poolManager = new TestTezSessionPoolManager(); + TezSessionState session = Mockito.mock(TezSessionState.class); + Mockito.when(session.isDefault()).thenReturn(false); + + poolManager.closeAndOpen(session, conf, false); + + Mockito.verify(session).close(false); + Mockito.verify(session).open(conf, null); + } + + @Test + public void testCloseAndOpenWithResources() throws Exception { + poolManager = new TestTezSessionPoolManager(); + TezSessionState session = Mockito.mock(TezSessionState.class); + Mockito.when(session.isDefault()).thenReturn(false); + String[] extraResources = new String[] { "file:///tmp/foo.jar" }; + + poolManager.closeAndOpen(session, conf, extraResources, false); + + Mockito.verify(session).close(false); + Mockito.verify(session).open(conf, extraResources); + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java index 45ab672..d004a27 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java @@ -30,9 +30,11 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; -import java.util.LinkedList; import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -48,6 +50,7 @@ import org.apache.hadoop.hive.ql.plan.TezEdgeProperty; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; import org.apache.hadoop.hive.ql.plan.TezWork; +import org.apache.hadoop.hive.ql.plan.TezWork.VertexType; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.mapred.JobConf; @@ -90,8 +93,11 @@ public void setUp() throws Exception { path = mock(Path.class); when(path.getFileSystem(any(Configuration.class))).thenReturn(fs); when(utils.getTezDir(any(Path.class))).thenReturn(path); - when(utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class), any(LocalResource.class), - any(List.class), any(FileSystem.class), any(Context.class), anyBoolean(), any(TezWork.class))).thenAnswer(new Answer() { + when( + utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class), + any(LocalResource.class), any(List.class), any(FileSystem.class), any(Context.class), + anyBoolean(), any(TezWork.class), any(VertexType.class))).thenAnswer( + new Answer() { @Override public Vertex answer(InvocationOnMock invocation) throws Throwable { @@ -101,8 +107,8 @@ public Vertex answer(InvocationOnMock invocation) throws Throwable { } }); - when(utils.createEdge(any(JobConf.class), any(Vertex.class), - any(Vertex.class), any(TezEdgeProperty.class))).thenAnswer(new Answer() { + when(utils.createEdge(any(JobConf.class), any(Vertex.class), any(Vertex.class), + any(TezEdgeProperty.class), any(VertexType.class))).thenAnswer(new Answer() { @Override public Edge answer(InvocationOnMock invocation) throws Throwable { @@ -204,10 +210,11 @@ public void testEmptyWork() throws IllegalArgumentException, IOException, Except @Test public void testSubmit() throws Exception { DAG dag = DAG.create("test"); - task.submit(conf, dag, path, appLr, sessionState, new LinkedList()); + task.submit(conf, dag, path, appLr, sessionState, Collections. emptyList(), + new String[0], Collections. emptyMap()); // validate close/reopen - verify(sessionState, times(1)).open(any(HiveConf.class)); - verify(sessionState, times(1)).close(eq(false)); // now uses pool after HIVE-7043 + verify(sessionState, times(1)).open(any(HiveConf.class), any(String[].class)); + verify(sessionState, times(1)).close(eq(true)); // now uses pool after HIVE-7043 verify(session, times(2)).submitDAG(any(DAG.class)); } @@ -216,4 +223,54 @@ public void testClose() throws HiveException { task.close(work, 0); verify(op, times(4)).jobClose(any(Configuration.class), eq(true)); } + + @Test + public void testExistingSessionGetsStorageHandlerResources() throws Exception { + final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"}; + LocalResource res = mock(LocalResource.class); + final List resources = Collections.singletonList(res); + final Map resMap = new HashMap(); + resMap.put("foo.jar", res); + + when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars)) + .thenReturn(resources); + when(utils.getBaseName(res)).thenReturn("foo.jar"); + when(sessionState.isOpen()).thenReturn(true); + when(sessionState.hasResources(inputOutputJars)).thenReturn(false); + task.updateSession(sessionState, conf, path, inputOutputJars, resMap); + verify(session).addAppMasterLocalFiles(resMap); + } + + @Test + public void testExtraResourcesAddedToDag() throws Exception { + final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"}; + LocalResource res = mock(LocalResource.class); + final List resources = Collections.singletonList(res); + final Map resMap = new HashMap(); + resMap.put("foo.jar", res); + DAG dag = mock(DAG.class); + + when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars)) + .thenReturn(resources); + when(utils.getBaseName(res)).thenReturn("foo.jar"); + when(sessionState.isOpen()).thenReturn(true); + when(sessionState.hasResources(inputOutputJars)).thenReturn(false); + task.addExtraResourcesToDag(sessionState, dag, inputOutputJars, resMap); + verify(dag).addTaskLocalFiles(resMap); + } + + @Test + public void testGetExtraLocalResources() throws Exception { + final String[] inputOutputJars = new String[] {"file:///tmp/foo.jar"}; + LocalResource res = mock(LocalResource.class); + final List resources = Collections.singletonList(res); + final Map resMap = new HashMap(); + resMap.put("foo.jar", res); + + when(utils.localizeTempFiles(path.toString(), conf, inputOutputJars)) + .thenReturn(resources); + when(utils.getBaseName(res)).thenReturn("foo.jar"); + + assertEquals(resMap, task.getExtraLocalResources(conf, path, inputOutputJars)); + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java b/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java index 19fdeb5..298ac26 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java @@ -90,7 +90,9 @@ * includes both native Hive storage formats as well as those enumerated in the * ADDITIONAL_STORAGE_FORMATS table. * - * @return List of storage format as paramters. + * @return List of storage format as a Collection of Object arrays, each containing (in order): + * Storage format name, SerDe class name, InputFormat class name, OutputFormat class name. + * This list is used as the parameters to JUnit parameterized tests. */ public static Collection asParameters() { List parameters = new ArrayList(); @@ -130,5 +132,21 @@ return parameters; } + + /** + * Returns a list of the names of storage formats. + * + * @return List of names of storage formats. + */ + public static Collection names() { + List names = new ArrayList(); + for (StorageFormatDescriptor descriptor : ServiceLoader.load(StorageFormatDescriptor.class)) { + String[] formatNames = new String[descriptor.getNames().size()]; + formatNames = descriptor.getNames().toArray(formatNames); + String[] params = { formatNames[0] }; + names.add(params); + } + return names; + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java index 292a835..f8afb84 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java @@ -115,7 +115,8 @@ public void doClose() throws IOException { } private void resetIOContext() { - ioContext = IOContext.get(); + conf.set(Utilities.INPUT_NAME, "TestHiveBinarySearchRecordReader"); + ioContext = IOContext.get(conf); ioContext.setUseSorted(false); ioContext.setIsBinarySearching(false); ioContext.setEndBinarySearch(false); @@ -124,6 +125,7 @@ private void resetIOContext() { } private void init() throws IOException { + conf = new JobConf(); resetIOContext(); rcfReader = mock(RCFileRecordReader.class); when(rcfReader.next((LongWritable)anyObject(), @@ -131,7 +133,6 @@ private void init() throws IOException { // Since the start is 0, and the length is 100, the first call to sync should be with the value // 50 so return that for getPos() when(rcfReader.getPos()).thenReturn(50L); - conf = new JobConf(); conf.setBoolean("hive.input.format.sorted", true); TableDesc tblDesc = Utilities.defaultTd; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java index 56b0a3d..7f0d12a 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestSymlinkTextInputFormat.java @@ -165,7 +165,7 @@ public void testCombine() throws Exception { + " failed with exit code= " + ecode); } - String cmd = "select key from " + tblName; + String cmd = "select key*1 from " + tblName; drv.compile(cmd); //create scratch dir diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 5fedb62..55392c9 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -1633,7 +1633,7 @@ public void testCombinationInputFormatWithAcid() throws Exception { assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0", combineSplit.getPath(bucket).toString()); assertEquals(0, combineSplit.getOffset(bucket)); - assertEquals(227, combineSplit.getLength(bucket)); + assertEquals(225, combineSplit.getLength(bucket)); } String[] hosts = combineSplit.getLocations(); assertEquals(2, hosts.length); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java index 6d6f132..0f606a4 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestNewIntegerEncoding.java @@ -335,6 +335,104 @@ public void testBasicDelta4() throws Exception { } @Test + public void testDeltaOverflow() throws Exception { + ObjectInspector inspector; + synchronized (TestOrcFile.class) { + inspector = ObjectInspectorFactory + .getReflectionObjectInspector(Long.class, + ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + } + + long[] inp = new long[]{4513343538618202719l, 4513343538618202711l, + 2911390882471569739l, + -9181829309989854913l}; + List input = Lists.newArrayList(Longs.asList(inp)); + + Writer writer = OrcFile.createWriter( + testFilePath, + OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000) + .compress(CompressionKind.NONE).bufferSize(10000)); + for (Long l : input) { + writer.addRow(l); + } + writer.close(); + + Reader reader = OrcFile + .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs)); + RecordReader rows = reader.rows(); + int idx = 0; + while (rows.hasNext()) { + Object row = rows.next(null); + assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get()); + } + } + + @Test + public void testDeltaOverflow2() throws Exception { + ObjectInspector inspector; + synchronized (TestOrcFile.class) { + inspector = ObjectInspectorFactory + .getReflectionObjectInspector(Long.class, + ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + } + + long[] inp = new long[]{Long.MAX_VALUE, 4513343538618202711l, + 2911390882471569739l, + Long.MIN_VALUE}; + List input = Lists.newArrayList(Longs.asList(inp)); + + Writer writer = OrcFile.createWriter( + testFilePath, + OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000) + .compress(CompressionKind.NONE).bufferSize(10000)); + for (Long l : input) { + writer.addRow(l); + } + writer.close(); + + Reader reader = OrcFile + .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs)); + RecordReader rows = reader.rows(); + int idx = 0; + while (rows.hasNext()) { + Object row = rows.next(null); + assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get()); + } + } + + @Test + public void testDeltaOverflow3() throws Exception { + ObjectInspector inspector; + synchronized (TestOrcFile.class) { + inspector = ObjectInspectorFactory + .getReflectionObjectInspector(Long.class, + ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + } + + long[] inp = new long[]{-4513343538618202711l, -2911390882471569739l, -2, + Long.MAX_VALUE}; + List input = Lists.newArrayList(Longs.asList(inp)); + + Writer writer = OrcFile.createWriter( + testFilePath, + OrcFile.writerOptions(conf).inspector(inspector).stripeSize(100000) + .compress(CompressionKind.NONE).bufferSize(10000)); + for (Long l : input) { + writer.addRow(l); + } + writer.close(); + + Reader reader = OrcFile + .createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs)); + RecordReader rows = reader.rows(); + int idx = 0; + while (rows.hasNext()) { + Object row = rows.next(null); + assertEquals(input.get(idx++).longValue(), ((LongWritable) row).get()); + } + } + + @Test public void testIntegerMin() throws Exception { ObjectInspector inspector; synchronized (TestOrcFile.class) { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java index b8b92b7..7aeaf9f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcFile.java @@ -1754,9 +1754,9 @@ public void testMemoryManagementV12() throws Exception { stripe.getDataLength() < 5000); } // with HIVE-7832, the dictionaries will be disabled after writing the first - // stripe as there are too many distinct values. Hence only 3 stripes as + // stripe as there are too many distinct values. Hence only 4 stripes as // compared to 25 stripes in version 0.11 (above test case) - assertEquals(3, i); + assertEquals(4, i); assertEquals(2500, reader.getNumberOfRows()); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java index 4a49f09..b3f9cf1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestSerializationUtils.java @@ -17,15 +17,18 @@ */ package org.apache.hadoop.hive.ql.io.orc; -import org.junit.Test; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.InputStream; import java.math.BigInteger; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; +import org.junit.Test; + +import com.google.common.math.LongMath; public class TestSerializationUtils { @@ -112,6 +115,47 @@ public void testBigIntegers() throws Exception { SerializationUtils.readBigInteger(fromBuffer(buffer))); } + @Test + public void testSubtractionOverflow() { + // cross check results with Guava results below + SerializationUtils utils = new SerializationUtils(); + assertEquals(false, utils.isSafeSubtract(22222222222L, Long.MIN_VALUE)); + assertEquals(false, utils.isSafeSubtract(-22222222222L, Long.MAX_VALUE)); + assertEquals(false, utils.isSafeSubtract(Long.MIN_VALUE, Long.MAX_VALUE)); + assertEquals(true, utils.isSafeSubtract(-1553103058346370095L, 6553103058346370095L)); + assertEquals(true, utils.isSafeSubtract(0, Long.MAX_VALUE)); + assertEquals(true, utils.isSafeSubtract(Long.MIN_VALUE, 0)); + } + + @Test + public void testSubtractionOverflowGuava() { + try { + LongMath.checkedSubtract(22222222222L, Long.MIN_VALUE); + fail("expected ArithmeticException for overflow"); + } catch (ArithmeticException ex) { + assertEquals(ex.getMessage(), "overflow"); + } + + try { + LongMath.checkedSubtract(-22222222222L, Long.MAX_VALUE); + fail("expected ArithmeticException for overflow"); + } catch (ArithmeticException ex) { + assertEquals(ex.getMessage(), "overflow"); + } + + try { + LongMath.checkedSubtract(Long.MIN_VALUE, Long.MAX_VALUE); + fail("expected ArithmeticException for overflow"); + } catch (ArithmeticException ex) { + assertEquals(ex.getMessage(), "overflow"); + } + + assertEquals(-8106206116692740190L, + LongMath.checkedSubtract(-1553103058346370095L, 6553103058346370095L)); + assertEquals(-Long.MAX_VALUE, LongMath.checkedSubtract(0, Long.MAX_VALUE)); + assertEquals(Long.MIN_VALUE, LongMath.checkedSubtract(Long.MIN_VALUE, 0)); + } + public static void main(String[] args) throws Exception { TestSerializationUtils test = new TestSerializationUtils(); test.testDoubles(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 406aae9..153908c 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -21,14 +21,18 @@ import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.regex.Pattern; import junit.framework.TestCase; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -45,6 +49,7 @@ import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer; import org.apache.hadoop.hive.serde2.thrift.test.Complex; +import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.mapred.TextInputFormat; @@ -63,6 +68,9 @@ protected void setUp() throws Exception { super.setUp(); hiveConf = new HiveConf(this.getClass()); + // enable trash so it can be tested + hiveConf.setFloat("fs.trash.checkpoint.interval", 30); // FS_TRASH_CHECKPOINT_INTERVAL_KEY (hadoop-2) + hiveConf.setFloat("fs.trash.interval", 30); // FS_TRASH_INTERVAL_KEY (hadoop-2) SessionState.start(hiveConf); try { hm = Hive.get(hiveConf); @@ -79,6 +87,9 @@ protected void setUp() throws Exception { protected void tearDown() throws Exception { try { super.tearDown(); + // disable trash + hiveConf.setFloat("fs.trash.checkpoint.interval", 30); // FS_TRASH_CHECKPOINT_INTERVAL_KEY (hadoop-2) + hiveConf.setFloat("fs.trash.interval", 30); // FS_TRASH_INTERVAL_KEY (hadoop-2) Hive.closeCurrent(); } catch (Exception e) { System.err.println(StringUtils.stringifyException(e)); @@ -294,7 +305,7 @@ public void testGetAndDropTables() throws Throwable { try { String dbName = "db_for_testgettables"; String table1Name = "table1"; - hm.dropDatabase(dbName, true, true); + hm.dropDatabase(dbName, true, true, true); Database db = new Database(); db.setName(dbName); @@ -330,16 +341,92 @@ public void testGetAndDropTables() throws Throwable { // Drop all tables for (String tableName : hm.getAllTables(dbName)) { + Table table = hm.getTable(dbName, tableName); hm.dropTable(dbName, tableName); + assertFalse(fs.exists(table.getPath())); } hm.dropDatabase(dbName); } catch (Throwable e) { System.err.println(StringUtils.stringifyException(e)); - System.err.println("testGetTables() failed"); + System.err.println("testGetAndDropTables() failed"); throw e; } } + public void testDropTableTrash() throws Throwable { + if (!ShimLoader.getHadoopShims().supportTrashFeature()) { + return; // it's hadoop-1 + } + try { + String dbName = "db_for_testdroptable"; + hm.dropDatabase(dbName, true, true, true); + + Database db = new Database(); + db.setName(dbName); + hm.createDatabase(db); + + List ts = new ArrayList(2); + String tableBaseName = "droptable"; + ts.add(tableBaseName + "1"); + ts.add(tableBaseName + "2"); + Table tbl1 = createTestTable(dbName, ts.get(0)); + hm.createTable(tbl1); + Table tbl2 = createTestTable(dbName, ts.get(1)); + hm.createTable(tbl2); + // test dropping tables and trash behavior + Table table1 = hm.getTable(dbName, ts.get(0)); + assertNotNull(table1); + assertEquals(ts.get(0), table1.getTableName()); + Path path1 = table1.getPath(); + FileSystem fs = path1.getFileSystem(hiveConf); + assertTrue(fs.exists(path1)); + // drop table and check that trash works + Path trashDir = ShimLoader.getHadoopShims().getCurrentTrashPath(hiveConf, fs); + assertNotNull("trash directory should not be null", trashDir); + Path trash1 = mergePaths(trashDir, path1); + Path pathglob = trash1.suffix("*");; + FileStatus before[] = fs.globStatus(pathglob); + hm.dropTable(dbName, ts.get(0)); + assertFalse(fs.exists(path1)); + FileStatus after[] = fs.globStatus(pathglob); + assertTrue("trash dir before and after DROP TABLE noPURGE are not different", + before.length != after.length); + + // drop a table without saving to trash by setting the purge option + Table table2 = hm.getTable(dbName, ts.get(1)); + assertNotNull(table2); + assertEquals(ts.get(1), table2.getTableName()); + Path path2 = table2.getPath(); + assertTrue(fs.exists(path2)); + Path trash2 = mergePaths(trashDir, path2); + System.out.println("trashDir2 is " + trash2); + pathglob = trash2.suffix("*"); + before = fs.globStatus(pathglob); + hm.dropTable(dbName, ts.get(1), true, true, true); // deleteData, ignoreUnknownTable, ifPurge + assertFalse(fs.exists(path2)); + after = fs.globStatus(pathglob); + Arrays.sort(before); + Arrays.sort(after); + assertEquals("trash dir before and after DROP TABLE PURGE are different", + before.length, after.length); + assertTrue("trash dir before and after DROP TABLE PURGE are different", + Arrays.equals(before, after)); + + // Drop all tables + for (String tableName : hm.getAllTables(dbName)) { + Table table = hm.getTable(dbName, tableName); + hm.dropTable(dbName, tableName); + assertFalse(fs.exists(table.getPath())); + } + hm.dropDatabase(dbName); + } catch (Throwable e) { + System.err.println(StringUtils.stringifyException(e)); + System.err.println("testDropTableTrash() failed"); + throw e; + } + } + + public void testPartition() throws Throwable { try { String tableName = "table_for_testpartition"; @@ -533,4 +620,39 @@ public void testHiveRefreshOnConfChange() throws Throwable{ newHiveObj = Hive.get(newHconf); assertTrue(prevHiveObj != newHiveObj); } + + // shamelessly copied from Path in hadoop-2 + private static final String SEPARATOR = "/"; + private static final char SEPARATOR_CHAR = '/'; + + private static final String CUR_DIR = "."; + + private static final boolean WINDOWS + = System.getProperty("os.name").startsWith("Windows"); + + private static final Pattern hasDriveLetterSpecifier = + Pattern.compile("^/?[a-zA-Z]:"); + + private static Path mergePaths(Path path1, Path path2) { + String path2Str = path2.toUri().getPath(); + path2Str = path2Str.substring(startPositionWithoutWindowsDrive(path2Str)); + // Add path components explicitly, because simply concatenating two path + // string is not safe, for example: + // "/" + "/foo" yields "//foo", which will be parsed as authority in Path + return new Path(path1.toUri().getScheme(), + path1.toUri().getAuthority(), + path1.toUri().getPath() + path2Str); + } + + private static int startPositionWithoutWindowsDrive(String path) { + if (hasWindowsDrive(path)) { + return path.charAt(0) == SEPARATOR_CHAR ? 3 : 2; + } else { + return 0; + } + } + + private static boolean hasWindowsDrive(String path) { + return (WINDOWS && hasDriveLetterSpecifier.matcher(path).find()); + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java index 1a5ba87..ee95e16 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHiveRemote.java @@ -84,6 +84,13 @@ protected void setUp() throws Exception { } /** + * Cannot control trash in remote metastore, so skip this test + */ + @Override + public void testDropTableTrash() { + } + + /** * Finds a free port. * * @return a free port diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java index 548215a..056c56d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java @@ -24,6 +24,9 @@ import org.junit.BeforeClass; import org.junit.Test; +/** + * various Parser tests for INSERT/UPDATE/DELETE + */ public class TestIUD { private static HiveConf conf; @@ -102,6 +105,18 @@ public void testUpdateWithWhereSingleSet() throws ParseException { ast.toStringTree()); } @Test + public void testUpdateWithWhereSingleSetExpr() throws ParseException { + ASTNode ast = parse("UPDATE src SET key = -3+(5*9)%8, val = cast(6.1 + c as INT), d = d - 1 WHERE value IS NULL"); + Assert.assertEquals("AST doesn't match", + "(TOK_UPDATE_TABLE (TOK_TABNAME src) " + + "(TOK_SET_COLUMNS_CLAUSE " + + "(= (TOK_TABLE_OR_COL key) (+ (- 3) (% (* 5 9) 8))) " + + "(= (TOK_TABLE_OR_COL val) (TOK_FUNCTION TOK_INT (+ 6.1 (TOK_TABLE_OR_COL c)))) " + + "(= (TOK_TABLE_OR_COL d) (- (TOK_TABLE_OR_COL d) 1))) " + + "(TOK_WHERE (TOK_FUNCTION TOK_ISNULL (TOK_TABLE_OR_COL value))))", + ast.toStringTree()); + } + @Test public void testUpdateWithWhereMultiSet() throws ParseException { ASTNode ast = parse("UPDATE src SET key = 3, value = 8 WHERE VALUE = 1230997"); Assert.assertEquals("AST doesn't match", @@ -207,13 +222,13 @@ public void testInsertIntoTableFromAnonymousTable1Row() throws ParseException { } @Test public void testInsertIntoTableFromAnonymousTable() throws ParseException { - ASTNode ast = parse("insert into table page_view values(1,2),(3,4)"); + ASTNode ast = parse("insert into table page_view values(-1,2),(3,+4)"); Assert.assertEquals("AST doesn't match", "(TOK_QUERY " + "(TOK_FROM " + "(TOK_VIRTUAL_TABLE " + "(TOK_VIRTUAL_TABREF TOK_ANONYMOUS) " + - "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " + + "(TOK_VALUES_TABLE (TOK_VALUE_ROW (- 1) 2) (TOK_VALUE_ROW 3 (+ 4))))) " + "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view))) " + "(TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))", ast.toStringTree()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java index 301b531..87ef193 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java @@ -266,9 +266,12 @@ private ReturnInfo parseAndAnalyze(String query, String testName) // I have to create the tables here (rather than in setup()) because I need the Hive // connection, which is conviently created by the semantic analyzer. - db.createTable("T", Arrays.asList("a", "b"), null, OrcInputFormat.class, OrcOutputFormat.class); + Map params = new HashMap(1); + params.put(SemanticAnalyzer.ACID_TABLE_PROPERTY, "true"); + db.createTable("T", Arrays.asList("a", "b"), null, OrcInputFormat.class, + OrcOutputFormat.class, 2, Arrays.asList("a"), params); db.createTable("U", Arrays.asList("a", "b"), Arrays.asList("ds"), OrcInputFormat.class, - OrcOutputFormat.class); + OrcOutputFormat.class, 2, Arrays.asList("a"), params); Table u = db.getTable("U"); Map partVals = new HashMap(2); partVals.put("ds", "yesterday"); @@ -280,7 +283,7 @@ private ReturnInfo parseAndAnalyze(String query, String testName) // validate the plan sem.validate(); - QueryPlan plan = new QueryPlan(query, sem, 0L, testName); + QueryPlan plan = new QueryPlan(query, sem, 0L, testName, null); return new ReturnInfo(tree, sem, plan); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java index 2600149..2344279 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestTezWork.java @@ -23,11 +23,16 @@ import junit.framework.Assert; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; +import org.apache.hadoop.mapred.JobConf; import org.junit.Before; import org.junit.Test; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; public class TestTezWork { + private static final String MR_JAR_PROPERTY = "tmpjars"; private List nodes; private TezWork work; @@ -156,4 +161,75 @@ public void testGetAllWork() throws Exception { Assert.assertEquals(sorted.get(i), nodes.get(4-i)); } } + + @Test + public void testConfigureJars() throws Exception { + final JobConf conf = new JobConf(); + conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar"); + BaseWork baseWork = Mockito.mock(BaseWork.class); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + conf.set(MR_JAR_PROPERTY, "file:///tmp/foo2.jar"); + return null; + } + + }).when(baseWork).configureJobConf(conf); + + work.add(baseWork); + work.configureJobConfAndExtractJars(conf); + Assert.assertEquals("file:///tmp/foo1.jar,file:///tmp/foo2.jar", conf.get(MR_JAR_PROPERTY)); + } + + @Test + public void testConfigureJarsNoExtraJars() throws Exception { + final JobConf conf = new JobConf(); + conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar"); + BaseWork baseWork = Mockito.mock(BaseWork.class); + + work.add(baseWork); + work.configureJobConfAndExtractJars(conf); + Assert.assertEquals("file:///tmp/foo1.jar", conf.get(MR_JAR_PROPERTY)); + } + + @Test + public void testConfigureJarsWithNull() throws Exception { + final JobConf conf = new JobConf(); + conf.set(MR_JAR_PROPERTY, "file:///tmp/foo1.jar"); + BaseWork baseWork = Mockito.mock(BaseWork.class); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + conf.unset(MR_JAR_PROPERTY); + return null; + } + + }).when(baseWork).configureJobConf(conf); + + work.add(baseWork); + work.configureJobConfAndExtractJars(conf); + Assert.assertEquals("file:///tmp/foo1.jar", conf.get(MR_JAR_PROPERTY)); + } + + @Test + public void testConfigureJarsStartingWithNull() throws Exception { + final JobConf conf = new JobConf(); + conf.unset(MR_JAR_PROPERTY); + BaseWork baseWork = Mockito.mock(BaseWork.class); + Mockito.doAnswer(new Answer() { + + @Override + public Void answer(InvocationOnMock invocation) throws Throwable { + conf.setStrings(MR_JAR_PROPERTY, "file:///tmp/foo1.jar", "file:///tmp/foo2.jar"); + return null; + } + + }).when(baseWork).configureJobConf(conf); + + work.add(baseWork); + work.configureJobConfAndExtractJars(conf); + Assert.assertEquals("file:///tmp/foo1.jar,file:///tmp/foo2.jar", conf.get(MR_JAR_PROPERTY)); + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java index 8cf0452..98da397 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/TestUDFMath.java @@ -98,14 +98,6 @@ public void testLog() throws HiveException { input = createDecimal("7.38905609893065"); DoubleWritable res = udf.evaluate(input); Assert.assertEquals(2.0, res.get(), 0.000001); - - DoubleWritable input = new DoubleWritable(9.0); - res = udf.evaluate(createDecimal("3.0"), input); - Assert.assertEquals(2.0, res.get(), 0.000001); - - DoubleWritable base = new DoubleWritable(3.0); - res = udf.evaluate(base, createDecimal("9.0")); - Assert.assertEquals(2.0, res.get(), 0.000001); res = udf.evaluate(createDecimal("3.0"), createDecimal("9.0")); Assert.assertEquals(2.0, res.get(), 0.000001); diff --git a/ql/src/test/queries/clientnegative/acid_overwrite.q b/ql/src/test/queries/clientnegative/acid_overwrite.q index 9f6c1f8..2e57a3c 100644 --- a/ql/src/test/queries/clientnegative/acid_overwrite.q +++ b/ql/src/test/queries/clientnegative/acid_overwrite.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uanp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10; insert overwrite table acid_uanp select cint, cast(cstring1 as varchar(128)) from alltypesorc; diff --git a/ql/src/test/queries/clientnegative/alter_partition_change_col_dup_col.q b/ql/src/test/queries/clientnegative/alter_partition_change_col_dup_col.q new file mode 100644 index 0000000..8f5a884 --- /dev/null +++ b/ql/src/test/queries/clientnegative/alter_partition_change_col_dup_col.q @@ -0,0 +1,4 @@ +create table alter_partition_change_col_dup_col (c1 string, c2 decimal(10,0)) partitioned by (p1 string); +alter table alter_partition_change_col_dup_col add partition (p1='abc'); +-- should fail because of duplicate name c1 +alter table alter_partition_change_col_dup_col change c2 c1 decimal(14,4); diff --git a/ql/src/test/queries/clientnegative/alter_partition_change_col_nonexist.q b/ql/src/test/queries/clientnegative/alter_partition_change_col_nonexist.q new file mode 100644 index 0000000..97348d9 --- /dev/null +++ b/ql/src/test/queries/clientnegative/alter_partition_change_col_nonexist.q @@ -0,0 +1,5 @@ +create table alter_partition_change_col_nonexist (c1 string, c2 decimal(10,0)) partitioned by (p1 string); +alter table alter_partition_change_col_nonexist add partition (p1='abc'); +-- should fail because of nonexistent column c3 +alter table alter_partition_change_col_nonexist change c3 c4 decimal(14,4); + diff --git a/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q b/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q index 090495a..f2de306 100644 --- a/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q +++ b/ql/src/test/queries/clientnegative/authorization_delete_nodeletepriv.q @@ -5,12 +5,11 @@ set hive.security.authorization.enabled=true; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -- check update without update priv -create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc;; +create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); set user.name=user1; delete from auth_nodel where i > 0; diff --git a/ql/src/test/queries/clientnegative/authorization_not_owner_drop_tab2.q b/ql/src/test/queries/clientnegative/authorization_not_owner_drop_tab2.q new file mode 100644 index 0000000..c4ef868 --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_not_owner_drop_tab2.q @@ -0,0 +1,14 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; +set hive.security.authorization.enabled=true; +set user.name=user1; + +create database db1; +use db1; +-- check if create table fails as different user. use db.table sytax +create table t1(i int); +use default; + +set user.name=user2; +drop table db1.t1; diff --git a/ql/src/test/queries/clientnegative/authorization_sba_drop_table.q b/ql/src/test/queries/clientnegative/authorization_sba_drop_table.q new file mode 100644 index 0000000..6fac025 --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_sba_drop_table.q @@ -0,0 +1,9 @@ +set hive.metastore.pre.event.listeners=org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; +set hive.security.metastore.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider; + +dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_sba_droptab1; + +create table t1(i int) location '${system:test.tmp.dir}/a_sba_droptab1'; +dfs -chmod 555 ${system:test.tmp.dir}/a_sba_droptab1; +-- Attempt to drop table without having write permissions on table dir should result in error +drop table t1; diff --git a/ql/src/test/queries/clientnegative/authorization_show_columns.q b/ql/src/test/queries/clientnegative/authorization_show_columns.q new file mode 100644 index 0000000..a6597af --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_show_columns.q @@ -0,0 +1,13 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; +set hive.security.authorization.enabled=true; + +create database db1; +use db1; +-- check query without select privilege fails +create table t1(i int); + +set user.name=user1; +show columns in t1; + diff --git a/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q b/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q index 922beba..7065527 100644 --- a/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q +++ b/ql/src/test/queries/clientnegative/authorization_update_noupdatepriv.q @@ -5,12 +5,11 @@ set hive.security.authorization.enabled=true; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -- check update without update priv -create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc;; +create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); set user.name=user1; update auth_noupd set i = 0 where i > 0; diff --git a/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q b/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q index fb50a7d..307c4f6 100644 --- a/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q +++ b/ql/src/test/queries/clientnegative/authorization_uri_create_table1.q @@ -7,6 +7,6 @@ dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/a_uri_crtab1; dfs -touchz ${system:test.tmp.dir}/a_uri_crtab1/1.txt; dfs -chmod 555 ${system:test.tmp.dir}/a_uri_crtab1/1.txt; -create table t1(i int) location '${system:test.tmp.dir}/a_uri_crtab_ext'; +create table t1(i int) location '${system:test.tmp.dir}/a_uri_crtab1'; -- Attempt to create table with dir that does not have write permission should fail diff --git a/ql/src/test/queries/clientnegative/delete_non_acid_table.q b/ql/src/test/queries/clientnegative/delete_non_acid_table.q new file mode 100644 index 0000000..6ae82ff --- /dev/null +++ b/ql/src/test/queries/clientnegative/delete_non_acid_table.q @@ -0,0 +1,12 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +set hive.enforce.bucketing=true; + +create table not_an_acid_table2(a int, b varchar(128)); + +insert into table not_an_acid_table2 select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; + +select a,b from not_an_acid_table2 order by a; + +delete from not_an_acid_table2 where b = '0ruyd6Y50JpdGRf6HqD'; diff --git a/ql/src/test/queries/clientnegative/delete_not_bucketed.q b/ql/src/test/queries/clientnegative/delete_not_bucketed.q new file mode 100644 index 0000000..80dffea --- /dev/null +++ b/ql/src/test/queries/clientnegative/delete_not_bucketed.q @@ -0,0 +1,7 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +create table acid_notbucketed(a int, b varchar(128)) stored as orc TBLPROPERTIES ('transactional'='true'); + +delete from acid_notbucketed where a = 3; diff --git a/ql/src/test/queries/clientnegative/delete_sorted.q b/ql/src/test/queries/clientnegative/delete_sorted.q new file mode 100644 index 0000000..fd8d579 --- /dev/null +++ b/ql/src/test/queries/clientnegative/delete_sorted.q @@ -0,0 +1,7 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +delete from acid_insertsort where a = 3; diff --git a/ql/src/test/queries/clientnegative/insert_sorted.q b/ql/src/test/queries/clientnegative/insert_sorted.q new file mode 100644 index 0000000..18c942a --- /dev/null +++ b/ql/src/test/queries/clientnegative/insert_sorted.q @@ -0,0 +1,7 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into table acid_insertsort select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; diff --git a/ql/src/test/queries/clientnegative/insert_values_sorted.q b/ql/src/test/queries/clientnegative/insert_values_sorted.q new file mode 100644 index 0000000..260e2fb --- /dev/null +++ b/ql/src/test/queries/clientnegative/insert_values_sorted.q @@ -0,0 +1,7 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into table acid_insertsort values (1, 'abc'),(2, 'def'); diff --git a/ql/src/test/queries/clientnegative/update_no_such_table.q b/ql/src/test/queries/clientnegative/update_no_such_table.q index 522c46d..07239cf 100644 --- a/ql/src/test/queries/clientnegative/update_no_such_table.q +++ b/ql/src/test/queries/clientnegative/update_no_such_table.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; diff --git a/ql/src/test/queries/clientnegative/update_non_acid_table.q b/ql/src/test/queries/clientnegative/update_non_acid_table.q new file mode 100644 index 0000000..dd0b01e --- /dev/null +++ b/ql/src/test/queries/clientnegative/update_non_acid_table.q @@ -0,0 +1,12 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +set hive.enforce.bucketing=true; + +create table not_an_acid_table(a int, b varchar(128)); + +insert into table not_an_acid_table select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; + +select a,b from not_an_acid_table order by a; + +update not_an_acid_table set b = 'fred' where b = '0ruyd6Y50JpdGRf6HqD'; diff --git a/ql/src/test/queries/clientnegative/update_not_bucketed.q b/ql/src/test/queries/clientnegative/update_not_bucketed.q new file mode 100644 index 0000000..8512fa7 --- /dev/null +++ b/ql/src/test/queries/clientnegative/update_not_bucketed.q @@ -0,0 +1,7 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +create table acid_notbucketed(a int, b varchar(128)) partitioned by (ds string) stored as orc TBLPROPERTIES ('transactional'='true'); + +update acid_notbucketed set b = 'fred' where a = 3; diff --git a/ql/src/test/queries/clientnegative/update_partition_col.q b/ql/src/test/queries/clientnegative/update_partition_col.q index 918d312..e9c60cc 100644 --- a/ql/src/test/queries/clientnegative/update_partition_col.q +++ b/ql/src/test/queries/clientnegative/update_partition_col.q @@ -1,8 +1,7 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); update foo set ds = 'fred'; diff --git a/ql/src/test/queries/clientnegative/update_sorted.q b/ql/src/test/queries/clientnegative/update_sorted.q new file mode 100644 index 0000000..917c3b5 --- /dev/null +++ b/ql/src/test/queries/clientnegative/update_sorted.q @@ -0,0 +1,7 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +update acid_insertsort set b = 'fred' where b = 'bob'; diff --git a/ql/src/test/queries/clientpositive/acid_vectorization.q b/ql/src/test/queries/clientpositive/acid_vectorization.q index 804144a..3f386c9 100644 --- a/ql/src/test/queries/clientpositive/acid_vectorization.q +++ b/ql/src/test/queries/clientpositive/acid_vectorization.q @@ -1,11 +1,10 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; set hive.exec.dynamic.partition.mode=nonstrict; set hive.vectorized.execution.enabled=true; -CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC; +CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true'); insert into table acid_vectorized select cint, cstring1 from alltypesorc where cint is not null order by cint limit 10; set hive.vectorized.execution.enabled=true; insert into table acid_vectorized values (1, 'bar'); diff --git a/ql/src/test/queries/clientpositive/alter_partition_change_col.q b/ql/src/test/queries/clientpositive/alter_partition_change_col.q new file mode 100644 index 0000000..baabb9f --- /dev/null +++ b/ql/src/test/queries/clientpositive/alter_partition_change_col.q @@ -0,0 +1,59 @@ +SET hive.exec.dynamic.partition = true; +SET hive.exec.dynamic.partition.mode = nonstrict; + +-- SORT_QUERY_RESULTS + +create table alter_partition_change_col0 (c1 string, c2 string); +load data local inpath '../../data/files/dec.txt' overwrite into table alter_partition_change_col0; + +create table alter_partition_change_col1 (c1 string, c2 string) partitioned by (p1 string); + +insert overwrite table alter_partition_change_col1 partition (p1) + select c1, c2, 'abc' from alter_partition_change_col0 + union all + select c1, c2, null from alter_partition_change_col0; + +show partitions alter_partition_change_col1; +select * from alter_partition_change_col1; + +-- Change c2 to decimal(10,0) +alter table alter_partition_change_col1 change c2 c2 decimal(10,0); +alter table alter_partition_change_col1 partition (p1='abc') change c2 c2 decimal(10,0); +alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') change c2 c2 decimal(10,0); +select * from alter_partition_change_col1; + +-- Change the column type at the table level. Table-level describe shows the new type, but the existing partition does not. +alter table alter_partition_change_col1 change c2 c2 decimal(14,4); +describe alter_partition_change_col1; +describe alter_partition_change_col1 partition (p1='abc'); +select * from alter_partition_change_col1; + +-- now change the column type of the existing partition +alter table alter_partition_change_col1 partition (p1='abc') change c2 c2 decimal(14,4); +describe alter_partition_change_col1 partition (p1='abc'); +select * from alter_partition_change_col1; + +-- change column for default partition value +alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') change c2 c2 decimal(14,4); +describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__'); +select * from alter_partition_change_col1; + +-- Try out replace columns +alter table alter_partition_change_col1 partition (p1='abc') replace columns (c1 string); +describe alter_partition_change_col1; +describe alter_partition_change_col1 partition (p1='abc'); +select * from alter_partition_change_col1; +alter table alter_partition_change_col1 replace columns (c1 string); +describe alter_partition_change_col1; +select * from alter_partition_change_col1; + +-- Try add columns +alter table alter_partition_change_col1 add columns (c2 decimal(14,4)); +describe alter_partition_change_col1; +describe alter_partition_change_col1 partition (p1='abc'); +select * from alter_partition_change_col1; + +alter table alter_partition_change_col1 partition (p1='abc') add columns (c2 decimal(14,4)); +describe alter_partition_change_col1 partition (p1='abc'); +select * from alter_partition_change_col1; + diff --git a/ql/src/test/queries/clientpositive/alter_table_location.q b/ql/src/test/queries/clientpositive/alter_table_location.q new file mode 100644 index 0000000..94c3948 --- /dev/null +++ b/ql/src/test/queries/clientpositive/alter_table_location.q @@ -0,0 +1,5 @@ +drop table if exists hcat_altertable_16; +create table hcat_altertable_16(a int, b string) stored as textfile; +show table extended like hcat_altertable_16; +alter table hcat_altertable_16 set location 'file:${system:test.tmp.dir}/hcat_altertable_16'; +show table extended like hcat_altertable_16; diff --git a/ql/src/test/queries/clientpositive/annotate_stats_groupby.q b/ql/src/test/queries/clientpositive/annotate_stats_groupby.q index 1c0829d..854e401 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_groupby.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_groupby.q @@ -1,4 +1,25 @@ set hive.stats.fetch.column.stats=true; +set hive.map.aggr.hash.percentmemory=0.0f; + +-- hash aggregation is disabled + +-- There are different cases for Group By depending on map/reduce side, hash aggregation, +-- grouping sets and column stats. If we don't have column stats, we just assume hash +-- aggregation is disabled. Following are the possible cases and rule for cardinality +-- estimation + +-- MAP SIDE: +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet +-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) +-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) +-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet + +-- REDUCE SIDE: +-- Case 7: NO column stats — numRows / 2 +-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) +-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) create table if not exists loc_staging ( state string, @@ -29,71 +50,91 @@ from ( select state as a, locid as b, count(*) as c ) sq1 group by a,c; -analyze table loc_orc compute statistics for columns state,locid,zip,year; +analyze table loc_orc compute statistics for columns state,locid,year; --- only one distinct value in year column + 1 NULL value --- map-side GBY: numRows: 8 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 +-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 2 explain select year from loc_orc group by year; --- map-side GBY: numRows: 8 --- reduce-side GBY: numRows: 4 +-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 8 explain select state,locid from loc_orc group by state,locid; --- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 32 explain select state,locid from loc_orc group by state,locid with cube; --- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 +-- Case 8: column stats, grouping sets - cardinality = 24 explain select state,locid from loc_orc group by state,locid with rollup; --- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 +-- Case 8: column stats, grouping sets - cardinality = 8 explain select state,locid from loc_orc group by state,locid grouping sets((state)); --- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 +-- Case 8: column stats, grouping sets - cardinality = 16 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); --- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 +-- Case 8: column stats, grouping sets - cardinality = 24 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); --- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 32 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); -set hive.stats.map.parallelism=10; +set hive.map.aggr.hash.percentmemory=0.5f; +set mapred.max.split.size=80; +-- map-side parallelism will be 10 --- map-side GBY: numRows: 80 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 +-- Case 9: column stats, NO grouping sets - caridnality = 2 explain select year from loc_orc group by year; --- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) +-- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 +-- Case 8: column stats, grouping sets - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube; +-- ndvProduct becomes 0 as zip does not have column stats +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state,zip from loc_orc group by state,zip; + +set mapred.max.split.size=1000; set hive.stats.fetch.column.stats=false; -set hive.stats.map.parallelism=1; --- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube; --- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 +-- Case 7: NO column stats - cardinality = 12 explain select state,locid from loc_orc group by state,locid with rollup; --- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 7: NO column stats - cardinality = 4 explain select state,locid from loc_orc group by state,locid grouping sets((state)); --- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 +-- Case 7: NO column stats - cardinality = 8 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); --- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 +-- Case 7: NO column stats - cardinality = 12 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); --- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); -set hive.stats.map.parallelism=10; +set mapred.max.split.size=80; --- map-side GBY: numRows: 80 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 7: NO column stats - cardinality = 4 explain select year from loc_orc group by year; --- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube; diff --git a/ql/src/test/queries/clientpositive/annotate_stats_groupby2.q b/ql/src/test/queries/clientpositive/annotate_stats_groupby2.q new file mode 100644 index 0000000..6e65577 --- /dev/null +++ b/ql/src/test/queries/clientpositive/annotate_stats_groupby2.q @@ -0,0 +1,64 @@ +drop table location; + +-- There are different cases for Group By depending on map/reduce side, hash aggregation, +-- grouping sets and column stats. If we don't have column stats, we just assume hash +-- aggregation is disabled. Following are the possible cases and rule for cardinality +-- estimation + +-- MAP SIDE: +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet +-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) +-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) +-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet + +-- REDUCE SIDE: +-- Case 7: NO column stats — numRows / 2 +-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) +-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) + +create table location (state string, country string, votes bigint); +load data local inpath "../../data/files/location.txt" overwrite into table location; + +analyze table location compute statistics; +analyze table location compute statistics for columns state, country; + +set mapred.max.split.size=50; +set hive.map.aggr.hash.percentmemory=0.5f; +set hive.stats.fetch.column.stats=false; + +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 20 +-- Case 7: NO column stats - cardinality = 10 +explain select state, country from location group by state, country; + +-- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 80 +-- Case 7: NO column stats - cardinality = 40 +explain select state, country from location group by state, country with cube; + +set hive.stats.fetch.column.stats=true; +-- parallelism = 4 + +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state, country from location group by state, country; + +-- column stats for votes is missing, so ndvProduct becomes 0 and will be set to numRows / 2 +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 10 +-- Case 9: column stats, NO grouping sets - caridnality = 5 +explain select state, votes from location group by state, votes; + +-- Case 4: column stats, hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 8 +explain select state, country from location group by state, country with cube; + +set hive.map.aggr.hash.percentmemory=0.0f; +-- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 20 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state, country from location group by state, country; + +-- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 80 +-- Case 8: column stats, grouping sets - cardinality = 8 +explain select state, country from location group by state, country with cube; + +drop table location; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/annotate_stats_part.q b/ql/src/test/queries/clientpositive/annotate_stats_part.q index f25776a..fcfe566 100644 --- a/ql/src/test/queries/clientpositive/annotate_stats_part.q +++ b/ql/src/test/queries/clientpositive/annotate_stats_part.q @@ -65,6 +65,9 @@ explain select zip from loc_orc; -- basicStatState: COMPLETE colStatState: PARTIAL explain select state from loc_orc; +-- basicStatState: COMPLETE colStatState: COMPLETE +explain select year from loc_orc; + -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL -- basicStatState: COMPLETE colStatState: PARTIAL explain select state,locid from loc_orc; diff --git a/ql/src/test/queries/clientpositive/authorization_delete.q b/ql/src/test/queries/clientpositive/authorization_delete.q index ebd0315..d96e6ab 100644 --- a/ql/src/test/queries/clientpositive/authorization_delete.q +++ b/ql/src/test/queries/clientpositive/authorization_delete.q @@ -4,13 +4,12 @@ set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; set user.name=user1; -- current user has been set (comment line before the set cmd is resulting in parse error!!) -CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc; +CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); -- grant update privilege to another user GRANT DELETE ON t_auth_del TO USER userWIns; diff --git a/ql/src/test/queries/clientpositive/authorization_delete_own_table.q b/ql/src/test/queries/clientpositive/authorization_delete_own_table.q index 19dbbeb..7abdc12 100644 --- a/ql/src/test/queries/clientpositive/authorization_delete_own_table.q +++ b/ql/src/test/queries/clientpositive/authorization_delete_own_table.q @@ -5,12 +5,11 @@ set hive.security.authorization.enabled=true; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; set user.name=user1; -create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc;; +create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); delete from auth_noupd where i > 0; set user.name=hive_admin_user; diff --git a/ql/src/test/queries/clientpositive/authorization_grant_option_role.q b/ql/src/test/queries/clientpositive/authorization_grant_option_role.q new file mode 100644 index 0000000..ea0b51b --- /dev/null +++ b/ql/src/test/queries/clientpositive/authorization_grant_option_role.q @@ -0,0 +1,28 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; + +set user.name=hive_admin_user; +set role admin; +create role r1; +grant role r1 to user r1user; + +set user.name=user1; +CREATE TABLE t1(i int); + +-- all privileges should have been set for user + +GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION; + +set user.name=r1user; +-- check if user belong to role r1 can grant privileges to others +GRANT ALL ON t1 TO USER user3; + +set user.name=hive_admin_user; +set role admin; +-- check privileges on table +show grant on table t1; + +-- check if drop role removes privileges for that role +drop role r1; +show grant on table t1; diff --git a/ql/src/test/queries/clientpositive/authorization_update.q b/ql/src/test/queries/clientpositive/authorization_update.q index 18ceadb..3601e67 100644 --- a/ql/src/test/queries/clientpositive/authorization_update.q +++ b/ql/src/test/queries/clientpositive/authorization_update.q @@ -4,13 +4,12 @@ set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.Sessi set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; set user.name=user1; -- current user has been set (comment line before the set cmd is resulting in parse error!!) -CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc; +CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); CREATE TABLE t_select(i int); GRANT ALL ON TABLE t_select TO ROLE public; diff --git a/ql/src/test/queries/clientpositive/authorization_update_own_table.q b/ql/src/test/queries/clientpositive/authorization_update_own_table.q index 46beb49..18a643c 100644 --- a/ql/src/test/queries/clientpositive/authorization_update_own_table.q +++ b/ql/src/test/queries/clientpositive/authorization_update_own_table.q @@ -5,12 +5,11 @@ set hive.security.authorization.enabled=true; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; set user.name=user1; -create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc;; +create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); update auth_noupd set i = 0 where i > 0; set user.name=hive_admin_user; diff --git a/ql/src/test/queries/clientpositive/avro_charvarchar.q b/ql/src/test/queries/clientpositive/avro_charvarchar.q new file mode 100644 index 0000000..f36e143 --- /dev/null +++ b/ql/src/test/queries/clientpositive/avro_charvarchar.q @@ -0,0 +1,27 @@ +DROP TABLE avro_charvarchar_staging; +DROP TABLE avro_charvarchar; + +CREATE TABLE avro_charvarchar_staging ( + cchar char(5), + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct +) ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':'; + +CREATE TABLE avro_charvarchar ( + cchar char(5), + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct +) STORED AS AVRO; + +LOAD DATA LOCAL INPATH '../../data/files/avro_charvarchar.txt' OVERWRITE INTO TABLE avro_charvarchar_staging; + +INSERT OVERWRITE TABLE avro_charvarchar SELECT * FROM avro_charvarchar_staging; + +SELECT * FROM avro_charvarchar; diff --git a/ql/src/test/queries/clientpositive/cbo_correctness.q b/ql/src/test/queries/clientpositive/cbo_correctness.q new file mode 100644 index 0000000..f7f0722 --- /dev/null +++ b/ql/src/test/queries/clientpositive/cbo_correctness.q @@ -0,0 +1,462 @@ +set hive.cbo.enable=true; +set hive.exec.check.crossproducts=false; + +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; + +create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE; +create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE; +create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE; + +load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014'); +load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014'); +load data local inpath '../../data/files/cbo_t3.txt' into table t3; + +CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +); + +LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part; + +DROP TABLE lineitem; +CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|'; + +LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem; + +create table src_cbo as select * from src; + + +set hive.stats.dbclass=jdbc:derby; +analyze table t1 partition (dt) compute statistics; +analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table t2 partition (dt) compute statistics; +analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table t3 compute statistics; +analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean; +analyze table src_cbo compute statistics; +analyze table src_cbo compute statistics for columns; +analyze table part compute statistics; +analyze table part compute statistics for columns; +analyze table lineitem compute statistics; +analyze table lineitem compute statistics for columns; + +set hive.stats.fetch.column.stats=true; +set hive.auto.convert.join=false; + +-- 1. Test Select + TS +select * from t1; +select * from t1 as t1; +select * from t1 as t2; + +select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1; + +-- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0; +select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; + +select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; + +-- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1; +select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; +select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; +select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1; + +select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0; +select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100; +select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100; + +select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0; +select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0; + +-- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key; +select t1.key from t1 join t3; +select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1; +select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key; +select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key; +select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key; + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; +select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; +select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p; +select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key; +select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key; + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; +select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; +select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key; +select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a; + +-- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); +select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); +select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); +select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0); + +select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0); + +select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); + + + +select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); + +select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + + + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + + + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + +select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0); + + +-- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int; +select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key; +select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c; + +-- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1; +select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1; +select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1; +select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5; +select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5; + +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5; + +-- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1; +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int; +select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1; +select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1; +select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1; +select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1; +select count(c_int) as a, avg(c_float), key from t1 group by key; +select count(distinct c_int) as a, avg(c_float) from t1 group by c_float; +select count(distinct c_int) as a, avg(c_float) from t1 group by c_int; +select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int; + +-- 9. Test Windowing Functions +select count(c_int) over() from t1; +select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1; +select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1; +select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1; +select 1+sum(c_int) over() from t1; +select sum(c_int)+sum(sum(c_int)) over() from t1; +select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1; +select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1; + +-- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from t1; +create view v2 as select c_int, value from t2; + +select value from v1 where c_boolean=false; +select max(c_int) from v1 group by (c_boolean); + +select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int; +select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int; + +select count(*) from v1 a join v1 b on a.value = b.value; + +create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean; + +select count(val) from v3 where val != '1'; +with q1 as ( select key from t1 where key = '1') +select count(*) from q1; + +with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 ; + +create view v4 as +with q1 as ( select key,c_int from t1 where key = '1') +select * from q1 +; + +with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a; + +with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int; + + +drop view v1; +drop view v2; +drop view v3; +drop view v4; + +-- 11. Union All +select * from t1 union all select * from t2 order by key; +select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key; +select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key; + +-- 12. SemiJoin +select t1.c_int from t1 left semi join t2 on t1.key=t2.key; +select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0); +select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0); +select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0); +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a; +select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a; + +-- 13. null expr in select list +select null from t3; + +-- 14. unary operator +select key from t1 where c_int = -6 or c_int = +6; + +-- 15. query referencing only partition columns +select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' ; + +-- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) +; + +-- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +; + +-- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +; + +-- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +; + +-- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +; + +-- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +; + +-- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +; + +-- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +; + +-- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +; + +-- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +; + +-- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +; + +-- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +; + +-- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +; + +-- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +; + +-- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +; + +select * from cv1 +; + +-- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +; + +-- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +; + +-- 17. get stats with empty partition list +select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true; + + diff --git a/ql/src/test/queries/clientpositive/constantPropagateForSubQuery.q b/ql/src/test/queries/clientpositive/constantPropagateForSubQuery.q new file mode 100644 index 0000000..149a290 --- /dev/null +++ b/ql/src/test/queries/clientpositive/constantPropagateForSubQuery.q @@ -0,0 +1,4 @@ +explain extended + select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c; + + select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c; diff --git a/ql/src/test/queries/clientpositive/create_func1.q b/ql/src/test/queries/clientpositive/create_func1.q index ad924d3..6f5f7f2 100644 --- a/ql/src/test/queries/clientpositive/create_func1.q +++ b/ql/src/test/queries/clientpositive/create_func1.q @@ -2,11 +2,16 @@ -- qtest_get_java_boolean should already be created during test initialization select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1; +describe function extended qtest_get_java_boolean; + create database mydb; create function mydb.func1 as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper'; show functions mydb.func1; +describe function extended mydb.func1; + + select mydb.func1('abc') from src limit 1; drop function mydb.func1; diff --git a/ql/src/test/queries/clientpositive/decimal_udf.q b/ql/src/test/queries/clientpositive/decimal_udf.q index 591c210..f9940d9 100644 --- a/ql/src/test/queries/clientpositive/decimal_udf.q +++ b/ql/src/test/queries/clientpositive/decimal_udf.q @@ -39,6 +39,9 @@ SELECT key - '1.0' FROM DECIMAL_UDF; EXPLAIN SELECT key * key FROM DECIMAL_UDF; SELECT key * key FROM DECIMAL_UDF; +EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0; +SELECT key, value FROM DECIMAL_UDF where key * value > 0; + EXPLAIN SELECT key * value FROM DECIMAL_UDF; SELECT key * value FROM DECIMAL_UDF; diff --git a/ql/src/test/queries/clientpositive/decimal_udf2.q b/ql/src/test/queries/clientpositive/decimal_udf2.q new file mode 100644 index 0000000..d4f6084 --- /dev/null +++ b/ql/src/test/queries/clientpositive/decimal_udf2.q @@ -0,0 +1,30 @@ +DROP TABLE IF EXISTS DECIMAL_UDF2; + +CREATE TABLE DECIMAL_UDF2 (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2; + +EXPLAIN +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10; + +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10; + +EXPLAIN +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10; + +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10; + +DROP TABLE IF EXISTS DECIMAL_UDF2; diff --git a/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q b/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q index 80a5991..9110dcc 100644 --- a/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q +++ b/ql/src/test/queries/clientpositive/delete_all_non_partitioned.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_danp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/delete_all_partitioned.q b/ql/src/test/queries/clientpositive/delete_all_partitioned.q index b848319..f082b6d 100644 --- a/ql/src/test/queries/clientpositive/delete_all_partitioned.q +++ b/ql/src/test/queries/clientpositive/delete_all_partitioned.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dap partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_dap partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 1000 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/delete_orig_table.q b/ql/src/test/queries/clientpositive/delete_orig_table.q index e1759f6..fd23f4b 100644 --- a/ql/src/test/queries/clientpositive/delete_orig_table.q +++ b/ql/src/test/queries/clientpositive/delete_orig_table.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/delete_orig_table; @@ -18,7 +17,7 @@ create table acid_dot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/delete_orig_table'; + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/delete_orig_table' TBLPROPERTIES ('transactional'='true'); select count(*) from acid_dot; diff --git a/ql/src/test/queries/clientpositive/delete_tmp_table.q b/ql/src/test/queries/clientpositive/delete_tmp_table.q index 5563b3c..eb6c095 100644 --- a/ql/src/test/queries/clientpositive/delete_tmp_table.q +++ b/ql/src/test/queries/clientpositive/delete_tmp_table.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dtt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/delete_where_no_match.q b/ql/src/test/queries/clientpositive/delete_where_no_match.q index 8ebff45..8ed979d 100644 --- a/ql/src/test/queries/clientpositive/delete_where_no_match.q +++ b/ql/src/test/queries/clientpositive/delete_where_no_match.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dwnm select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q b/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q index b37ec80..dac5375 100644 --- a/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q +++ b/ql/src/test/queries/clientpositive/delete_where_non_partitioned.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dwnp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/delete_where_partitioned.q b/ql/src/test/queries/clientpositive/delete_where_partitioned.q index cce89f4..f84f26a 100644 --- a/ql/src/test/queries/clientpositive/delete_where_partitioned.q +++ b/ql/src/test/queries/clientpositive/delete_where_partitioned.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dwp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_dwp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/delete_whole_partition.q b/ql/src/test/queries/clientpositive/delete_whole_partition.q index 2cb3e74..8228a32 100644 --- a/ql/src/test/queries/clientpositive/delete_whole_partition.q +++ b/ql/src/test/queries/clientpositive/delete_whole_partition.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_dwhp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_dwhp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > -10000000 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/drop_table_purge.q b/ql/src/test/queries/clientpositive/drop_table_purge.q new file mode 100644 index 0000000..f094a5b --- /dev/null +++ b/ql/src/test/queries/clientpositive/drop_table_purge.q @@ -0,0 +1,4 @@ +SET hive.metastore.batch.retrieve.max=1; +CREATE TABLE IF NOT EXISTS temp(col STRING); + +DROP TABLE temp PURGE; diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q b/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q index 67c5249..5a7f113 100644 --- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q +++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning_2.q @@ -19,6 +19,9 @@ load data local inpath '../../data/files/agg_01-p1.txt' into table agg_01 partit load data local inpath '../../data/files/agg_01-p2.txt' into table agg_01 partition (dim_shops_id=2); load data local inpath '../../data/files/agg_01-p3.txt' into table agg_01 partition (dim_shops_id=3); +analyze table dim_shops compute statistics; +analyze table agg_01 partition (dim_shops_id) compute statistics; + select * from dim_shops; select * from agg_01; @@ -40,6 +43,73 @@ d1.label in ('foo', 'bar') GROUP BY d1.label ORDER BY d1.label; +set hive.tez.dynamic.partition.pruning.max.event.size=1000000; +set hive.tez.dynamic.partition.pruning.max.data.size=1; + +EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +EXPLAIN SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id; + +SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id; + +EXPLAIN SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1; + +SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1; + +set hive.tez.dynamic.partition.pruning.max.event.size=1; +set hive.tez.dynamic.partition.pruning.max.data.size=1000000; + +EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label; + +set hive.tez.dynamic.partition.pruning.max.event.size=100000; +set hive.tez.dynamic.partition.pruning.max.data.size=1000000; + EXPLAIN SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' UNION ALL @@ -47,4 +117,4 @@ SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'foo' UNION ALL -SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; \ No newline at end of file +SELECT amount FROM agg_01, dim_shops WHERE dim_shops_id = id AND label = 'bar'; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q index ce5a5b7..78816ae 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_opt_vectorization.q @@ -108,6 +108,13 @@ set hive.optimize.sort.dynamic.partition=false; explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i; set hive.optimize.sort.dynamic.partition=true; explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i; +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27; + +set hive.optimize.sort.dynamic.partition=false; +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t; +set hive.optimize.sort.dynamic.partition=true; +-- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t; set hive.optimize.sort.dynamic.partition=false; insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q index 8c3c68f..e459583 100644 --- a/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization.q @@ -102,6 +102,13 @@ set hive.optimize.sort.dynamic.partition=false; explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; set hive.optimize.sort.dynamic.partition=true; explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27; + +set hive.optimize.sort.dynamic.partition=false; +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t; +set hive.optimize.sort.dynamic.partition=true; +-- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t; set hive.optimize.sort.dynamic.partition=false; insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i; diff --git a/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q new file mode 100644 index 0000000..70c795d --- /dev/null +++ b/ql/src/test/queries/clientpositive/dynpart_sort_optimization2.q @@ -0,0 +1,246 @@ +set hive.optimize.sort.dynamic.partition=true; +set hive.exec.dynamic.partition=true; +set hive.exec.max.dynamic.partitions=1000; +set hive.exec.max.dynamic.partitions.pernode=1000; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.enforce.bucketing=false; +set hive.enforce.sorting=false; +set hive.exec.submitviachild=true; +set hive.exec.submit.local.task.via.child=true; + +drop table ss; +drop table ss_orc; +drop table ss_part; +drop table ss_part_orc; + +create table ss ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float); + +create table ss_part ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int); + +load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss; + +explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk; + +insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk; + +desc formatted ss_part partition(ss_sold_date_sk=2452617); +select * from ss_part where ss_sold_date_sk=2452617; + +desc formatted ss_part partition(ss_sold_date_sk=2452638); +select * from ss_part where ss_sold_date_sk=2452638; + +explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk; + +insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk; + +desc formatted ss_part partition(ss_sold_date_sk=2452617); +select * from ss_part where ss_sold_date_sk=2452617; + +desc formatted ss_part partition(ss_sold_date_sk=2452638); +select * from ss_part where ss_sold_date_sk=2452638; + +set hive.optimize.sort.dynamic.partition=false; +-- SORT DYNAMIC PARTITION DISABLED + +explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk; + +insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk; + +desc formatted ss_part partition(ss_sold_date_sk=2452617); +select * from ss_part where ss_sold_date_sk=2452617; + +desc formatted ss_part partition(ss_sold_date_sk=2452638); +select * from ss_part where ss_sold_date_sk=2452638; + +explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk; + +insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk; + +desc formatted ss_part partition(ss_sold_date_sk=2452617); +select * from ss_part where ss_sold_date_sk=2452617; + +desc formatted ss_part partition(ss_sold_date_sk=2452638); +select * from ss_part where ss_sold_date_sk=2452638; + +set hive.vectorized.execution.enabled=true; +-- VECTORIZATION IS ENABLED + +create table ss_orc ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) stored as orc; + +create table ss_part_orc ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) stored as orc; + +insert overwrite table ss_orc select * from ss; + +drop table ss; +drop table ss_part; + +explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk; + +insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk; + +desc formatted ss_part_orc partition(ss_sold_date_sk=2452617); +select * from ss_part_orc where ss_sold_date_sk=2452617; + +desc formatted ss_part_orc partition(ss_sold_date_sk=2452638); +select * from ss_part_orc where ss_sold_date_sk=2452638; + +explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk; + +insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk; + +desc formatted ss_part_orc partition(ss_sold_date_sk=2452617); +select * from ss_part_orc where ss_sold_date_sk=2452617; + +desc formatted ss_part_orc partition(ss_sold_date_sk=2452638); +select * from ss_part_orc where ss_sold_date_sk=2452638; + +drop table ss_orc; +drop table ss_part_orc; + +drop table if exists hive13_dp1; +create table if not exists hive13_dp1 ( + k1 int, + k2 int +) +PARTITIONED BY(`day` string) +STORED AS ORC; + +set hive.optimize.sort.dynamic.partition=false; +explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key; + +insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key; +select * from hive13_dp1 limit 5; + +set hive.optimize.sort.dynamic.partition=true; +explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key; + +insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key; +select * from hive13_dp1 limit 5; + +drop table hive13_dp1; diff --git a/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q b/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q new file mode 100644 index 0000000..c544589 --- /dev/null +++ b/ql/src/test/queries/clientpositive/insert_acid_dynamic_partition.q @@ -0,0 +1,10 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; +set hive.exec.dynamic.partition.mode=nonstrict; + +create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into table acid_dynamic partition (ds) select cint, cast(cstring1 as varchar(128)), cstring2 from alltypesorc where cint is not null and cint < 0 order by cint limit 5; + +select * from acid_dynamic order by a,b; diff --git a/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q new file mode 100644 index 0000000..a29b1e7 --- /dev/null +++ b/ql/src/test/queries/clientpositive/insert_acid_not_bucketed.q @@ -0,0 +1,9 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +create table acid_notbucketed(a int, b varchar(128)) stored as orc; + +insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; + +select * from acid_notbucketed; diff --git a/ql/src/test/queries/clientpositive/insert_into1.q b/ql/src/test/queries/clientpositive/insert_into1.q index edc65a4..f19506a 100644 --- a/ql/src/test/queries/clientpositive/insert_into1.q +++ b/ql/src/test/queries/clientpositive/insert_into1.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=true; DROP TABLE insert_into1; CREATE TABLE insert_into1 (key int, value string); @@ -7,14 +8,18 @@ INSERT INTO TABLE insert_into1 SELECT * from src LIMIT 100; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 ) t; - +explain +select count(*) from insert_into1; +select count(*) from insert_into1; EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100; INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 ) t; +explain SELECT COUNT(*) FROM insert_into1; +select count(*) from insert_into1; EXPLAIN INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10; INSERT OVERWRITE TABLE insert_into1 SELECT * FROM src LIMIT 10; @@ -22,5 +27,10 @@ SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into1 ) t; +explain +SELECT COUNT(*) FROM insert_into1; +select count(*) from insert_into1; DROP TABLE insert_into1; + +set hive.compute.query.using.stats=false; diff --git a/ql/src/test/queries/clientpositive/insert_into2.q b/ql/src/test/queries/clientpositive/insert_into2.q index 0cce958..1cbe391 100644 --- a/ql/src/test/queries/clientpositive/insert_into2.q +++ b/ql/src/test/queries/clientpositive/insert_into2.q @@ -1,3 +1,4 @@ +set hive.compute.query.using.stats=true; DROP TABLE insert_into2; CREATE TABLE insert_into2 (key int, value string) PARTITIONED BY (ds string); @@ -5,7 +6,12 @@ CREATE TABLE insert_into2 (key int, value string) EXPLAIN INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src LIMIT 100; INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100; +explain +select count (*) from insert_into2 where ds = '1'; +select count (*) from insert_into2 where ds = '1'; INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100; +explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='1'; SELECT COUNT(*) FROM insert_into2 WHERE ds='1'; SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 @@ -19,6 +25,9 @@ INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 ) t; +explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; +SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') SELECT * FROM src LIMIT 50; @@ -27,5 +36,11 @@ INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM insert_into2 ) t; +explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; +SELECT COUNT(*) FROM insert_into2 WHERE ds='2'; + DROP TABLE insert_into2; + +set hive.compute.query.using.stats=false; diff --git a/ql/src/test/queries/clientpositive/insert_orig_table.q b/ql/src/test/queries/clientpositive/insert_orig_table.q index 2c6df88..c38bd5a 100644 --- a/ql/src/test/queries/clientpositive/insert_orig_table.q +++ b/ql/src/test/queries/clientpositive/insert_orig_table.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_iot( @@ -15,7 +14,7 @@ create table acid_iot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc; + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true'); LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_iot; diff --git a/ql/src/test/queries/clientpositive/insert_update_delete.q b/ql/src/test/queries/clientpositive/insert_update_delete.q index 34350df..8dbb77c 100644 --- a/ql/src/test/queries/clientpositive/insert_update_delete.q +++ b/ql/src/test/queries/clientpositive/insert_update_delete.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_iud select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q b/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q new file mode 100644 index 0000000..fc0cb10 --- /dev/null +++ b/ql/src/test/queries/clientpositive/insert_values_acid_not_bucketed.q @@ -0,0 +1,9 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +create table acid_notbucketed(a int, b varchar(128)) stored as orc; + +insert into table acid_notbucketed values (1, 'abc'), (2, 'def'); + +select * from acid_notbucketed; diff --git a/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q index bde2e71..71e0e73 100644 --- a/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q +++ b/ql/src/test/queries/clientpositive/insert_values_dynamic_partitioned.q @@ -1,12 +1,11 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table ivdp(i int, de decimal(5,2), - vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc; + vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table ivdp partition (ds) values (1, 109.23, 'and everywhere that mary went', 'today'), diff --git a/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q index 9d57f23..d0e7b0f 100644 --- a/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q +++ b/ql/src/test/queries/clientpositive/insert_values_non_partitioned.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_ivnp(ti tinyint, @@ -15,7 +14,7 @@ create table acid_ivnp(ti tinyint, b boolean, s string, vc varchar(128), - ch char(12)) clustered by (i) into 2 buckets stored as orc; + ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_ivnp values (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', true, 'mary had a little lamb', 'ring around the rosie', 'red'), diff --git a/ql/src/test/queries/clientpositive/insert_values_orig_table.q b/ql/src/test/queries/clientpositive/insert_values_orig_table.q index d3d68d2..8fef549 100644 --- a/ql/src/test/queries/clientpositive/insert_values_orig_table.q +++ b/ql/src/test/queries/clientpositive/insert_values_orig_table.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_ivot( @@ -15,7 +14,7 @@ create table acid_ivot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc; + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true'); LOAD DATA LOCAL INPATH "../../data/files/alltypesorc" into table acid_ivot; diff --git a/ql/src/test/queries/clientpositive/insert_values_partitioned.q b/ql/src/test/queries/clientpositive/insert_values_partitioned.q index 23d6d4c..c8223f7 100644 --- a/ql/src/test/queries/clientpositive/insert_values_partitioned.q +++ b/ql/src/test/queries/clientpositive/insert_values_partitioned.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_ivp(ti tinyint, @@ -14,7 +13,7 @@ create table acid_ivp(ti tinyint, dt date, s string, vc varchar(128), - ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc; + ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_ivp partition (ds='today') values (1, 257, 65537, 4294967297, 3.14, 3.141592654, 109.23, '2014-08-25 17:21:30.0', '2014-08-25', 'mary had a little lamb', 'ring around the rosie', 'red'), diff --git a/ql/src/test/queries/clientpositive/insert_values_tmp_table.q b/ql/src/test/queries/clientpositive/insert_values_tmp_table.q index fd8ec29..4e4c39e 100644 --- a/ql/src/test/queries/clientpositive/insert_values_tmp_table.q +++ b/ql/src/test/queries/clientpositive/insert_values_tmp_table.q @@ -1,12 +1,12 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc; +create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_ivtt values (1, 109.23, 'mary had a little lamb'), - (429496729, 0.14, 'its fleece was white as snow'); + (429496729, 0.14, 'its fleece was white as snow'), + (-29496729, -0.14, 'negative values test'); select i, de, vc from acid_ivtt order by i; diff --git a/ql/src/test/queries/clientpositive/join_merge_multi_expressions.q b/ql/src/test/queries/clientpositive/join_merge_multi_expressions.q new file mode 100644 index 0000000..dcab313 --- /dev/null +++ b/ql/src/test/queries/clientpositive/join_merge_multi_expressions.q @@ -0,0 +1,3 @@ +explain +select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key; +select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key; diff --git a/ql/src/test/queries/clientpositive/optimize_nullscan.q b/ql/src/test/queries/clientpositive/optimize_nullscan.q index d06b89b..61a71a2 100644 --- a/ql/src/test/queries/clientpositive/optimize_nullscan.q +++ b/ql/src/test/queries/clientpositive/optimize_nullscan.q @@ -5,6 +5,10 @@ select key from src where false; select key from src where false; explain extended +select count(key) from srcpart where 1=2 group by key; +select count(key) from srcpart where 1=2 group by key; + +explain extended select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key; select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key; diff --git a/ql/src/test/queries/clientpositive/parquet_types.q b/ql/src/test/queries/clientpositive/parquet_types.q index 86af5af..22585c3 100644 --- a/ql/src/test/queries/clientpositive/parquet_types.q +++ b/ql/src/test/queries/clientpositive/parquet_types.q @@ -10,9 +10,14 @@ CREATE TABLE parquet_types_staging ( cstring1 string, t timestamp, cchar char(5), - cvarchar varchar(10) + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct ) ROW FORMAT DELIMITED -FIELDS TERMINATED BY '|'; +FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':'; CREATE TABLE parquet_types ( cint int, @@ -23,7 +28,10 @@ CREATE TABLE parquet_types ( cstring1 string, t timestamp, cchar char(5), - cvarchar varchar(10) + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct ) STORED AS PARQUET; LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging; @@ -32,6 +40,8 @@ INSERT OVERWRITE TABLE parquet_types SELECT * FROM parquet_types_staging; SELECT * FROM parquet_types; +SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types; + SELECT ctinyint, MAX(cint), MIN(csmallint), diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q index 5501a65..de51cdd 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat2.q @@ -1,4 +1,4 @@ - +-- SORT_BEFORE_DIFF create table partition_test_partitioned(key string, value string) partitioned by (dt string); diff --git a/ql/src/test/queries/clientpositive/quote2.q b/ql/src/test/queries/clientpositive/quote2.q index c93902a..17cbfca 100644 --- a/ql/src/test/queries/clientpositive/quote2.q +++ b/ql/src/test/queries/clientpositive/quote2.q @@ -10,6 +10,7 @@ SELECT 'abc\\\\\'', "abc\\\\\"", 'abc\\\\\\', "abc\\\\\\", 'abc""""\\', "abc''''\\", + 'mysql_%\\_\%', 'mysql\\\_\\\\\%', "awk '{print NR\"\\t\"$0}'", 'tab\ttab', "tab\ttab" FROM src @@ -24,6 +25,7 @@ SELECT 'abc\\\\\'', "abc\\\\\"", 'abc\\\\\\', "abc\\\\\\", 'abc""""\\', "abc''''\\", + 'mysql_%\\_\%', 'mysql\\\_\\\\\%', "awk '{print NR\"\\t\"$0}'", 'tab\ttab', "tab\ttab" FROM src diff --git a/ql/src/test/queries/clientpositive/tez_smb_1.q b/ql/src/test/queries/clientpositive/tez_smb_1.q new file mode 100644 index 0000000..b675eea --- /dev/null +++ b/ql/src/test/queries/clientpositive/tez_smb_1.q @@ -0,0 +1,38 @@ +set hive.auto.convert.join=true; +set hive.auto.convert.join.noconditionaltask=true; +set hive.auto.convert.join.noconditionaltask.size=10000; +set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; + +CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; + +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); + +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); + +set hive.enforce.bucketing=true; +set hive.enforce.sorting = true; +set hive.optimize.bucketingsorting=false; +insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part; + +CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin; + +set hive.convert.join.bucket.mapjoin.tez = true; +set hive.auto.convert.sortmerge.join = true; + +set hive.auto.convert.join.noconditionaltask.size=500; + +explain +select count(*) from tab s1 join tab s3 on s1.key=s3.key; + +select s1.key, s1.value, s3.value from tab s1 join tab s3 on s1.key=s3.key; +select count(*) from tab s2; + diff --git a/ql/src/test/queries/clientpositive/tez_smb_main.q b/ql/src/test/queries/clientpositive/tez_smb_main.q new file mode 100644 index 0000000..4f178f7 --- /dev/null +++ b/ql/src/test/queries/clientpositive/tez_smb_main.q @@ -0,0 +1,84 @@ +explain +select * from src a join src1 b on a.key = b.key; + +select * from src a join src1 b on a.key = b.key; + +set hive.auto.convert.join=true; +set hive.auto.convert.join.noconditionaltask=true; +set hive.auto.convert.join.noconditionaltask.size=10000; +set hive.auto.convert.sortmerge.join.bigtable.selection.policy = org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ; + +CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; + +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); + +load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); + +set hive.enforce.bucketing=true; +set hive.enforce.sorting = true; +set hive.optimize.bucketingsorting=false; +insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part; + +CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin; + +set hive.convert.join.bucket.mapjoin.tez = true; +set hive.auto.convert.sortmerge.join = true; + +explain +select count(*) +from tab a join tab_part b on a.key = b.key; + +select count(*) +from tab a join tab_part b on a.key = b.key; + +set hive.auto.convert.join.noconditionaltask.size=2000; +explain +select count (*) +from tab a join tab_part b on a.key = b.key; + +select count(*) +from tab a join tab_part b on a.key = b.key; + +set hive.auto.convert.join.noconditionaltask.size=1000; +explain +select count (*) +from tab a join tab_part b on a.key = b.key; + +select count(*) +from tab a join tab_part b on a.key = b.key; + +set hive.auto.convert.join.noconditionaltask.size=500; +explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; + +explain select count(*) from tab a join tab_part b on a.value = b.value; +select count(*) from tab a join tab_part b on a.value = b.value; + +explain +select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +UNION ALL +select s2.key as key, s2.value as value from tab s2 +) a join tab_part b on (a.key = b.key); + +set hive.auto.convert.join.noconditionaltask.size=10000; +explain select count(*) from tab a join tab_part b on a.value = b.value; +select count(*) from tab a join tab_part b on a.value = b.value; + +explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; +select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value; + +explain +select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +UNION ALL +select s2.key as key, s2.value as value from tab s2 +) a join tab_part b on (a.key = b.key); + diff --git a/ql/src/test/queries/clientpositive/tez_union_group_by.q b/ql/src/test/queries/clientpositive/tez_union_group_by.q new file mode 100644 index 0000000..56e8583 --- /dev/null +++ b/ql/src/test/queries/clientpositive/tez_union_group_by.q @@ -0,0 +1,87 @@ +CREATE TABLE x +( +u bigint, +t string, +st string +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB"); + +CREATE TABLE y +( +u bigint +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB"); + +CREATE TABLE z +( +u bigint +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB"); + +CREATE TABLE v +( +t string, +st string, +id int +) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB"); + +EXPLAIN +SELECT o.u, n.u +FROM +( +SELECT m.u, Min(date) as ft +FROM +( +SELECT u, date FROM x WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM y WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM z WHERE date < '2014-09-02' +) m +GROUP BY m.u +) n +LEFT OUTER JOIN +( +SELECT x.u +FROM x +JOIN v +ON (x.t = v.t AND x.st <=> v.st) +WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +GROUP BY x.u +) o +ON n.u = o.u +WHERE n.u <> 0 AND n.ft <= '2014-09-02'; + +SELECT o.u, n.u +FROM +( +SELECT m.u, Min(date) as ft +FROM +( +SELECT u, date FROM x WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM y WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM z WHERE date < '2014-09-02' +) m +GROUP BY m.u +) n +LEFT OUTER JOIN +( +SELECT x.u +FROM x +JOIN v +ON (x.t = v.t AND x.st <=> v.st) +WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +GROUP BY x.u +) o +ON n.u = o.u +WHERE n.u <> 0 AND n.ft <= '2014-09-02'; diff --git a/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q b/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q index 04d2df5..6ef209f 100644 --- a/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q +++ b/ql/src/test/queries/clientpositive/update_after_multiple_inserts.q @@ -1,12 +1,11 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_uami(i int, de decimal(5,2), - vc varchar(128)) clustered by (i) into 2 buckets stored as orc; + vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uami values (1, 109.23, 'mary had a little lamb'), diff --git a/ql/src/test/queries/clientpositive/update_all_non_partitioned.q b/ql/src/test/queries/clientpositive/update_all_non_partitioned.q index 67d6ba9..3c01825 100644 --- a/ql/src/test/queries/clientpositive/update_all_non_partitioned.q +++ b/ql/src/test/queries/clientpositive/update_all_non_partitioned.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uanp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint < 0 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/update_all_partitioned.q b/ql/src/test/queries/clientpositive/update_all_partitioned.q index 0b6c767..b407985 100644 --- a/ql/src/test/queries/clientpositive/update_all_partitioned.q +++ b/ql/src/test/queries/clientpositive/update_all_partitioned.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uap partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_uap partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 10 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/update_all_types.q b/ql/src/test/queries/clientpositive/update_all_types.q index 39fe73d..14ffc59 100644 --- a/ql/src/test/queries/clientpositive/update_all_types.q +++ b/ql/src/test/queries/clientpositive/update_all_types.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; create table acid_uat(ti tinyint, @@ -15,7 +14,7 @@ create table acid_uat(ti tinyint, s string, vc varchar(128), ch char(36), - b boolean) clustered by (i) into 2 buckets stored as orc; + b boolean) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uat select ctinyint, @@ -53,4 +52,11 @@ update acid_uat set select * from acid_uat order by i; +update acid_uat set + ti = ti * 2, + si = cast(f as int), + d = floor(de) + where s = 'aw724t8c5558x2xneC624'; + +select * from acid_uat order by i; diff --git a/ql/src/test/queries/clientpositive/update_orig_table.q b/ql/src/test/queries/clientpositive/update_orig_table.q index f09ad32..27b4a95 100644 --- a/ql/src/test/queries/clientpositive/update_orig_table.q +++ b/ql/src/test/queries/clientpositive/update_orig_table.q @@ -1,6 +1,5 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/update_orig_table; @@ -18,7 +17,7 @@ create table acid_uot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/update_orig_table'; + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc location '${system:test.tmp.dir}/update_orig_table' TBLPROPERTIES ('transactional'='true'); update acid_uot set cstring1 = 'fred' where cint < -1070551679; diff --git a/ql/src/test/queries/clientpositive/update_tmp_table.q b/ql/src/test/queries/clientpositive/update_tmp_table.q index c863cd6..281357f 100644 --- a/ql/src/test/queries/clientpositive/update_tmp_table.q +++ b/ql/src/test/queries/clientpositive/update_tmp_table.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc; +create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_utt select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/update_two_cols.q b/ql/src/test/queries/clientpositive/update_two_cols.q index 3233d2f..b1972e5 100644 --- a/ql/src/test/queries/clientpositive/update_two_cols.q +++ b/ql/src/test/queries/clientpositive/update_two_cols.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc; +create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_utc select cint, cast(cstring1 as varchar(128)), cfloat from alltypesorc where cint < 0 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/update_where_no_match.q b/ql/src/test/queries/clientpositive/update_where_no_match.q index 00583c3..d578862 100644 --- a/ql/src/test/queries/clientpositive/update_where_no_match.q +++ b/ql/src/test/queries/clientpositive/update_where_no_match.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_wnm select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/update_where_non_partitioned.q b/ql/src/test/queries/clientpositive/update_where_non_partitioned.q index 378cf94..06c688f 100644 --- a/ql/src/test/queries/clientpositive/update_where_non_partitioned.q +++ b/ql/src/test/queries/clientpositive/update_where_non_partitioned.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc; +create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uwnp select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/update_where_partitioned.q b/ql/src/test/queries/clientpositive/update_where_partitioned.q index c5b6d04..858cebb 100644 --- a/ql/src/test/queries/clientpositive/update_where_partitioned.q +++ b/ql/src/test/queries/clientpositive/update_where_partitioned.q @@ -1,9 +1,8 @@ set hive.support.concurrency=true; set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; -set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; set hive.enforce.bucketing=true; -create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc; +create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true'); insert into table acid_uwp partition (ds='today') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint < 0 order by cint limit 10; insert into table acid_uwp partition (ds='tomorrow') select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null and cint > 100 order by cint limit 10; diff --git a/ql/src/test/queries/clientpositive/vector_char_4.q b/ql/src/test/queries/clientpositive/vector_char_4.q new file mode 100644 index 0000000..c824456 --- /dev/null +++ b/ql/src/test/queries/clientpositive/vector_char_4.q @@ -0,0 +1,51 @@ +SET hive.vectorized.execution.enabled=true; + +drop table if exists vectortab2k; +drop table if exists vectortab2korc; + +create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; + +create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC; + +INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; + +drop table if exists char_lazy_binary_columnar; +create table char_lazy_binary_columnar(ct char(10), csi char(10), ci char(20), cb char(30), cf char(20), cd char(20), cs char(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile; + +explain +insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc; + +-- insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc; + +-- select count(*) as cnt from char_lazy_binary_columnar group by cs order by cnt asc; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_char_simple.q b/ql/src/test/queries/clientpositive/vector_char_simple.q index ec46630..858fe16 100644 --- a/ql/src/test/queries/clientpositive/vector_char_simple.q +++ b/ql/src/test/queries/clientpositive/vector_char_simple.q @@ -41,3 +41,16 @@ order by key desc limit 5; drop table char_2; + + +-- Implicit conversion. Occurs in reduce-side under Tez. +create table char_3 ( + field char(12) +) stored as orc; + +explain +insert into table char_3 select cint from alltypesorc limit 10; + +insert into table char_3 select cint from alltypesorc limit 10; + +drop table char_3; diff --git a/ql/src/test/queries/clientpositive/vector_count_distinct.q b/ql/src/test/queries/clientpositive/vector_count_distinct.q new file mode 100644 index 0000000..c1aae08 --- /dev/null +++ b/ql/src/test/queries/clientpositive/vector_count_distinct.q @@ -0,0 +1,108 @@ +SET hive.vectorized.execution.enabled=true; + +create table web_sales_txt +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_web_site_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +row format delimited fields terminated by '|' +stored as textfile; + +LOAD DATA LOCAL INPATH '../../data/files/web_sales_2k' OVERWRITE INTO TABLE web_sales_txt; + +------------------------------------------------------------------------------------------ + +create table web_sales +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +partitioned by +( + ws_web_site_sk int +) +stored as orc +tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384"); + +set hive.exec.dynamic.partition.mode=nonstrict; + +insert overwrite table web_sales +partition (ws_web_site_sk) +select ws_sold_date_sk, ws_sold_time_sk, ws_ship_date_sk, ws_item_sk, + ws_bill_customer_sk, ws_bill_cdemo_sk, ws_bill_hdemo_sk, ws_bill_addr_sk, + ws_ship_customer_sk, ws_ship_cdemo_sk, ws_ship_hdemo_sk, ws_ship_addr_sk, + ws_web_page_sk, ws_ship_mode_sk, ws_warehouse_sk, ws_promo_sk, ws_order_number, + ws_quantity, ws_wholesale_cost, ws_list_price, ws_sales_price, ws_ext_discount_amt, + ws_ext_sales_price, ws_ext_wholesale_cost, ws_ext_list_price, ws_ext_tax, + ws_coupon_amt, ws_ext_ship_cost, ws_net_paid, ws_net_paid_inc_tax, ws_net_paid_inc_ship, + ws_net_paid_inc_ship_tax, ws_net_profit, ws_web_site_sk from web_sales_txt; + +------------------------------------------------------------------------------------------ + +explain +select count(distinct ws_order_number) from web_sales; + +select count(distinct ws_order_number) from web_sales; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_distinct_2.q b/ql/src/test/queries/clientpositive/vector_distinct_2.q new file mode 100644 index 0000000..399936b --- /dev/null +++ b/ql/src/test/queries/clientpositive/vector_distinct_2.q @@ -0,0 +1,43 @@ +SET hive.vectorized.execution.enabled=true; + +create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; + +create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC; + +INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; + +explain +select distinct s, t from vectortab2korc; + +select distinct s, t from vectortab2korc; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_groupby_3.q b/ql/src/test/queries/clientpositive/vector_groupby_3.q new file mode 100644 index 0000000..d66b568 --- /dev/null +++ b/ql/src/test/queries/clientpositive/vector_groupby_3.q @@ -0,0 +1,43 @@ +SET hive.vectorized.execution.enabled=true; + +create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; + +create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC; + +INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; + +explain +select s, t, max(b) from vectortab2korc group by s, t; + +select s, t, max(b) from vectortab2korc group by s, t; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_orderby_5.q b/ql/src/test/queries/clientpositive/vector_orderby_5.q new file mode 100644 index 0000000..571ecc9 --- /dev/null +++ b/ql/src/test/queries/clientpositive/vector_orderby_5.q @@ -0,0 +1,43 @@ +SET hive.vectorized.execution.enabled=true; + +create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; + +create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC; + +INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; + +explain +select bo, max(b) from vectortab2korc group by bo order by bo desc; + +select bo, max(b) from vectortab2korc group by bo order by bo desc; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_varchar_4.q b/ql/src/test/queries/clientpositive/vector_varchar_4.q new file mode 100644 index 0000000..c1e9c67 --- /dev/null +++ b/ql/src/test/queries/clientpositive/vector_varchar_4.q @@ -0,0 +1,51 @@ +SET hive.vectorized.execution.enabled=true; + +drop table if exists vectortab2k; +drop table if exists vectortab2korc; + +create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k; + +create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC; + +INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k; + +drop table if exists varchar_lazy_binary_columnar; +create table varchar_lazy_binary_columnar(vt varchar(10), vsi varchar(10), vi varchar(20), vb varchar(30), vf varchar(20),vd varchar(20),vs varchar(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile; + +explain +insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc; + +-- insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc; + +-- select count(*) as cnt from varchar_lazy_binary_columnar group by vs order by cnt asc; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vector_varchar_simple.q b/ql/src/test/queries/clientpositive/vector_varchar_simple.q index 68d6b09..1cd30ee 100644 --- a/ql/src/test/queries/clientpositive/vector_varchar_simple.q +++ b/ql/src/test/queries/clientpositive/vector_varchar_simple.q @@ -1,12 +1,12 @@ SET hive.vectorized.execution.enabled=true; -drop table char_2; +drop table varchar_2; -create table char_2 ( +create table varchar_2 ( key varchar(10), value varchar(20) ) stored as orc; -insert overwrite table char_2 select * from src; +insert overwrite table varchar_2 select * from src; select key, value from src @@ -14,13 +14,13 @@ order by key asc limit 5; explain select key, value -from char_2 +from varchar_2 order by key asc limit 5; -- should match the query from src select key, value -from char_2 +from varchar_2 order by key asc limit 5; @@ -30,14 +30,26 @@ order by key desc limit 5; explain select key, value -from char_2 +from varchar_2 order by key desc limit 5; -- should match the query from src select key, value -from char_2 +from varchar_2 order by key desc limit 5; -drop table char_2; +drop table varchar_2; + +-- Implicit conversion. Occurs in reduce-side under Tez. +create table varchar_3 ( + field varchar(25) +) stored as orc; + +explain +insert into table varchar_3 select cint from alltypesorc limit 10; + +insert into table varchar_3 select cint from alltypesorc limit 10; + +drop table varchar_3; diff --git a/ql/src/test/queries/clientpositive/vectorization_0.q b/ql/src/test/queries/clientpositive/vectorization_0.q index 39fba7d..b3cd794 100644 --- a/ql/src/test/queries/clientpositive/vectorization_0.q +++ b/ql/src/test/queries/clientpositive/vectorization_0.q @@ -1,4 +1,180 @@ SET hive.vectorized.execution.enabled=true; + +-- Use ORDER BY clauses to generate 2 stages. +EXPLAIN +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1; + +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1; + +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1; + +EXPLAIN +SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))); + SELECT AVG(cbigint), (-(AVG(cbigint))), (-6432 + AVG(cbigint)), diff --git a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q index 6392fc9..1fb0dac 100644 --- a/ql/src/test/queries/clientpositive/vectorized_date_funcs.q +++ b/ql/src/test/queries/clientpositive/vectorized_date_funcs.q @@ -122,4 +122,20 @@ SELECT FROM date_udf_flight_orc LIMIT 10; -- Test extracting the date part of expression that includes time -SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1; \ No newline at end of file +SELECT to_date('2009-07-30 04:17:52') FROM date_udf_flight_orc LIMIT 1; + +EXPLAIN SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1; + +SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q new file mode 100644 index 0000000..1197f7d --- /dev/null +++ b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q @@ -0,0 +1,192 @@ +set hive.optimize.ppd=true; +set hive.ppd.remove.duplicatefilters=true; +set hive.tez.dynamic.partition.pruning=true; +set hive.optimize.metadataonly=false; +set hive.optimize.index.filter=true; +set hive.vectorized.execution.enabled=true; + + +select distinct ds from srcpart; +select distinct hr from srcpart; + +EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds; +create table srcpart_date stored as orc as select ds as ds, ds as date from srcpart group by ds; +create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr; +create table srcpart_date_hour stored as orc as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr; +create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr; + +-- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +set hive.tez.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +set hive.tez.dynamic.partition.pruning=true; +select count(*) from srcpart where ds = '2008-04-08'; + +-- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +set hive.tez.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +set hive.tez.dynamic.partition.pruning=true; +select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; + +-- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +set hive.tez.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +set hive.tez.dynamic.partition.pruning=true; +select count(*) from srcpart where ds = '2008-04-08' and hr = 11; + +-- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +set hive.tez.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +set hive.tez.dynamic.partition.pruning=true; +select count(*) from srcpart where ds = 'I DONT EXIST'; + +-- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +set hive.tez.dynamic.partition.pruning=false; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +set hive.tez.dynamic.partition.pruning=true; +select count(*) from srcpart where hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11; +set hive.tez.dynamic.partition.pruning=true; +select count(*) from srcpart where cast(hr as string) = 11; + + +-- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +select count(*) from srcpart where ds = '2008-04-08'; + +-- non-equi join +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); + +-- old style join syntax +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; + +-- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; + +-- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; + +-- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; + +-- union + subquery +EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); + +set hive.auto.convert.join=true; +set hive.auto.convert.join.noconditionaltask = true; +set hive.auto.convert.join.noconditionaltask.size = 10000000; + +-- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +select count(*) from srcpart where ds = '2008-04-08'; + +-- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; + +-- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart where ds = '2008-04-08' and hr = 11; + +-- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +-- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; + +-- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11; +select count(*) from srcpart where hr = 11; + +-- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +select count(*) from srcpart where ds = '2008-04-08'; + +-- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; + +-- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; + +-- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +-- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +-- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; + +-- union + subquery +EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); +select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); + + +-- different file format +create table srcpart_orc (key int, value string) partitioned by (ds string, hr int) stored as orc; + + +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.vectorized.execution.enabled=false; +set hive.exec.max.dynamic.partitions=1000; + +insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart; +EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09'); +select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09'); +select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11; + +drop table srcpart_orc; +drop table srcpart_date; +drop table srcpart_hour; +drop table srcpart_date_hour; +drop table srcpart_double_hour; diff --git a/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q b/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q index 95eedd3..8a2d5aa 100644 --- a/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q +++ b/ql/src/test/queries/clientpositive/vectorized_timestamp_funcs.q @@ -1,6 +1,7 @@ -SET hive.vectorized.execution.enabled = true; - -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +-- Turning on vectorization has been temporarily moved after filling the test table +-- due to bug HIVE-8197. + CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC; @@ -11,6 +12,8 @@ SELECT FROM alltypesorc LIMIT 40; +SET hive.vectorized.execution.enabled = true; + CREATE TABLE alltypesorc_wrong(stimestamp1 string) STORED AS ORC; INSERT INTO TABLE alltypesorc_wrong SELECT 'abcd' FROM alltypesorc LIMIT 1; @@ -122,3 +125,48 @@ SELECT second(stimestamp1) FROM alltypesorc_wrong ORDER BY c1; + +EXPLAIN SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string; + +SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string; + +-- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... +EXPLAIN SELECT + sum(ctimestamp1) +FROM alltypesorc_string; + +SELECT + sum(ctimestamp1) +FROM alltypesorc_string; + +EXPLAIN SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string; + +SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string; \ No newline at end of file diff --git a/ql/src/test/queries/positive/udf6.q b/ql/src/test/queries/positive/udf6.q index 65791c4..fc7f99c 100644 --- a/ql/src/test/queries/positive/udf6.q +++ b/ql/src/test/queries/positive/udf6.q @@ -1 +1 @@ -FROM src SELECT CONCAT('a', 'b'), IF(TRUE, 1 ,2) +FROM src SELECT CONCAT('a', 'b'), IF(TRUE, 1 ,2) + key diff --git a/ql/src/test/resources/orc-file-dump-dictionary-threshold.out b/ql/src/test/resources/orc-file-dump-dictionary-threshold.out index 965d283..380f0e0 100644 --- a/ql/src/test/resources/orc-file-dump-dictionary-threshold.out +++ b/ql/src/test/resources/orc-file-dump-dictionary-threshold.out @@ -38,7 +38,7 @@ File Statistics: Column 3: count: 21000 min: Darkness,-230 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 sum: 6910238 Stripes: - Stripe: offset: 3 data: 151109 rows: 5000 tail: 68 index: 704 + Stripe: offset: 3 data: 151108 rows: 5000 tail: 68 index: 704 Stream: column 0 section ROW_INDEX start: 3 length 15 Stream: column 1 section ROW_INDEX start: 18 length 156 Stream: column 2 section ROW_INDEX start: 174 length 172 @@ -46,7 +46,7 @@ Stripes: Stream: column 1 section DATA start: 707 length 20029 Stream: column 2 section DATA start: 20736 length 40035 Stream: column 3 section DATA start: 60771 length 86757 - Stream: column 3 section LENGTH start: 147528 length 4288 + Stream: column 3 section LENGTH start: 147528 length 4287 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -65,19 +65,19 @@ Stripes: Entry 4: count: 1000 min: -9216505819108477308 max: 9196474183833079923 positions: 20006,8686,416 Row group index column 3: Entry 0: count: 1000 min: Darkness,-230 max: worst-54-290-346-648-908-996 positions: 0,0,0,0,0 - Entry 1: count: 1000 min: Darkness,-230-368-488-586-862-930-1686 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966 positions: 2777,8442,0,696,18 - Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660 positions: 13595,4780,0,1555,14 - Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788 positions: 31432,228,0,2373,90 - Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744 positions: 54111,5096,0,3355,108 - Stripe: offset: 151884 data: 336358 rows: 5000 tail: 69 index: 941 - Stream: column 0 section ROW_INDEX start: 151884 length 15 - Stream: column 1 section ROW_INDEX start: 151899 length 150 - Stream: column 2 section ROW_INDEX start: 152049 length 167 - Stream: column 3 section ROW_INDEX start: 152216 length 609 - Stream: column 1 section DATA start: 152825 length 20029 - Stream: column 2 section DATA start: 172854 length 40035 - Stream: column 3 section DATA start: 212889 length 270789 - Stream: column 3 section LENGTH start: 483678 length 5505 + Entry 1: count: 1000 min: Darkness,-230-368-488-586-862-930-1686 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966 positions: 2777,8442,0,695,18 + Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660 positions: 13595,4780,0,1554,14 + Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788 positions: 31432,228,0,2372,90 + Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744 positions: 54111,5096,0,3354,108 + Stripe: offset: 151883 data: 336358 rows: 5000 tail: 69 index: 941 + Stream: column 0 section ROW_INDEX start: 151883 length 15 + Stream: column 1 section ROW_INDEX start: 151898 length 150 + Stream: column 2 section ROW_INDEX start: 152048 length 167 + Stream: column 3 section ROW_INDEX start: 152215 length 609 + Stream: column 1 section DATA start: 152824 length 20029 + Stream: column 2 section DATA start: 172853 length 40035 + Stream: column 3 section DATA start: 212888 length 270789 + Stream: column 3 section LENGTH start: 483677 length 5505 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -100,15 +100,15 @@ Stripes: Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988 positions: 80822,9756,0,1945,222 Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984 positions: 137149,4496,0,3268,48 Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938 positions: 197972,6590,0,4064,342 - Stripe: offset: 489252 data: 558031 rows: 5000 tail: 69 index: 1169 - Stream: column 0 section ROW_INDEX start: 489252 length 15 - Stream: column 1 section ROW_INDEX start: 489267 length 159 - Stream: column 2 section ROW_INDEX start: 489426 length 169 - Stream: column 3 section ROW_INDEX start: 489595 length 826 - Stream: column 1 section DATA start: 490421 length 20029 - Stream: column 2 section DATA start: 510450 length 40035 - Stream: column 3 section DATA start: 550485 length 492258 - Stream: column 3 section LENGTH start: 1042743 length 5709 + Stripe: offset: 489251 data: 558031 rows: 5000 tail: 69 index: 1169 + Stream: column 0 section ROW_INDEX start: 489251 length 15 + Stream: column 1 section ROW_INDEX start: 489266 length 159 + Stream: column 2 section ROW_INDEX start: 489425 length 169 + Stream: column 3 section ROW_INDEX start: 489594 length 826 + Stream: column 1 section DATA start: 490420 length 20029 + Stream: column 2 section DATA start: 510449 length 40035 + Stream: column 3 section DATA start: 550484 length 492258 + Stream: column 3 section LENGTH start: 1042742 length 5709 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -131,15 +131,15 @@ Stripes: Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976 positions: 170641,3422,0,2077,162 Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766 positions: 268420,9960,0,3369,16 Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974 positions: 377916,1620,0,4041,470 - Stripe: offset: 1048521 data: 792850 rows: 5000 tail: 69 index: 1351 - Stream: column 0 section ROW_INDEX start: 1048521 length 15 - Stream: column 1 section ROW_INDEX start: 1048536 length 149 - Stream: column 2 section ROW_INDEX start: 1048685 length 170 - Stream: column 3 section ROW_INDEX start: 1048855 length 1017 - Stream: column 1 section DATA start: 1049872 length 20029 - Stream: column 2 section DATA start: 1069901 length 40035 - Stream: column 3 section DATA start: 1109936 length 727038 - Stream: column 3 section LENGTH start: 1836974 length 5748 + Stripe: offset: 1048520 data: 792850 rows: 5000 tail: 69 index: 1351 + Stream: column 0 section ROW_INDEX start: 1048520 length 15 + Stream: column 1 section ROW_INDEX start: 1048535 length 149 + Stream: column 2 section ROW_INDEX start: 1048684 length 170 + Stream: column 3 section ROW_INDEX start: 1048854 length 1017 + Stream: column 1 section DATA start: 1049871 length 20029 + Stream: column 2 section DATA start: 1069900 length 40035 + Stream: column 3 section DATA start: 1109935 length 727038 + Stream: column 3 section LENGTH start: 1836973 length 5748 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -162,15 +162,15 @@ Stripes: Entry 2: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878 positions: 263111,206,0,1926,462 Entry 3: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788 positions: 407371,8480,0,3444,250 Entry 4: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904 positions: 562094,3058,0,4643,292 - Stripe: offset: 1842791 data: 188033 rows: 1000 tail: 67 index: 832 - Stream: column 0 section ROW_INDEX start: 1842791 length 10 - Stream: column 1 section ROW_INDEX start: 1842801 length 36 - Stream: column 2 section ROW_INDEX start: 1842837 length 39 - Stream: column 3 section ROW_INDEX start: 1842876 length 747 - Stream: column 1 section DATA start: 1843623 length 4007 - Stream: column 2 section DATA start: 1847630 length 8007 - Stream: column 3 section DATA start: 1855637 length 174759 - Stream: column 3 section LENGTH start: 2030396 length 1260 + Stripe: offset: 1842790 data: 188033 rows: 1000 tail: 67 index: 832 + Stream: column 0 section ROW_INDEX start: 1842790 length 10 + Stream: column 1 section ROW_INDEX start: 1842800 length 36 + Stream: column 2 section ROW_INDEX start: 1842836 length 39 + Stream: column 3 section ROW_INDEX start: 1842875 length 747 + Stream: column 1 section DATA start: 1843622 length 4007 + Stream: column 2 section DATA start: 1847629 length 8007 + Stream: column 3 section DATA start: 1855636 length 174759 + Stream: column 3 section LENGTH start: 2030395 length 1260 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -182,6 +182,6 @@ Stripes: Row group index column 3: Entry 0: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164-19348-19400-19546-19776-19896-20084 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 positions: 0,0,0,0,0 -File length: 2033559 bytes +File length: 2033557 bytes Padding length: 0 bytes Padding ratio: 0% diff --git a/ql/src/test/resources/orc-file-dump.out b/ql/src/test/resources/orc-file-dump.out index 40dfc11..d67b53a 100644 --- a/ql/src/test/resources/orc-file-dump.out +++ b/ql/src/test/resources/orc-file-dump.out @@ -38,16 +38,16 @@ File Statistics: Column 3: count: 21000 min: Darkness, max: worst sum: 81761 Stripes: - Stripe: offset: 3 data: 63766 rows: 5000 tail: 79 index: 428 + Stripe: offset: 3 data: 63765 rows: 5000 tail: 79 index: 428 Stream: column 0 section ROW_INDEX start: 3 length 15 Stream: column 1 section ROW_INDEX start: 18 length 158 Stream: column 2 section ROW_INDEX start: 176 length 171 Stream: column 3 section ROW_INDEX start: 347 length 84 Stream: column 1 section DATA start: 431 length 20029 Stream: column 2 section DATA start: 20460 length 40035 - Stream: column 3 section DATA start: 60495 length 3544 - Stream: column 3 section LENGTH start: 64039 length 25 - Stream: column 3 section DICTIONARY_DATA start: 64064 length 133 + Stream: column 3 section DATA start: 60495 length 3543 + Stream: column 3 section LENGTH start: 64038 length 25 + Stream: column 3 section DICTIONARY_DATA start: 64063 length 133 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -68,18 +68,18 @@ Stripes: Entry 0: count: 1000 min: Darkness, max: worst positions: 0,0,0 Entry 1: count: 1000 min: Darkness, max: worst positions: 0,659,149 Entry 2: count: 1000 min: Darkness, max: worst positions: 0,1531,3 - Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2282,32 - Entry 4: count: 1000 min: Darkness, max: worst positions: 0,3034,45 - Stripe: offset: 64276 data: 63755 rows: 5000 tail: 79 index: 421 - Stream: column 0 section ROW_INDEX start: 64276 length 15 - Stream: column 1 section ROW_INDEX start: 64291 length 157 - Stream: column 2 section ROW_INDEX start: 64448 length 169 - Stream: column 3 section ROW_INDEX start: 64617 length 80 - Stream: column 1 section DATA start: 64697 length 20029 - Stream: column 2 section DATA start: 84726 length 40035 - Stream: column 3 section DATA start: 124761 length 3533 - Stream: column 3 section LENGTH start: 128294 length 25 - Stream: column 3 section DICTIONARY_DATA start: 128319 length 133 + Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2281,32 + Entry 4: count: 1000 min: Darkness, max: worst positions: 0,3033,45 + Stripe: offset: 64275 data: 63754 rows: 5000 tail: 79 index: 421 + Stream: column 0 section ROW_INDEX start: 64275 length 15 + Stream: column 1 section ROW_INDEX start: 64290 length 157 + Stream: column 2 section ROW_INDEX start: 64447 length 169 + Stream: column 3 section ROW_INDEX start: 64616 length 80 + Stream: column 1 section DATA start: 64696 length 20029 + Stream: column 2 section DATA start: 84725 length 40035 + Stream: column 3 section DATA start: 124760 length 3532 + Stream: column 3 section LENGTH start: 128292 length 25 + Stream: column 3 section DICTIONARY_DATA start: 128317 length 133 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -101,17 +101,17 @@ Stripes: Entry 1: count: 1000 min: Darkness, max: worst positions: 0,761,12 Entry 2: count: 1000 min: Darkness, max: worst positions: 0,1472,70 Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2250,43 - Entry 4: count: 1000 min: Darkness, max: worst positions: 0,2979,88 - Stripe: offset: 128531 data: 63766 rows: 5000 tail: 79 index: 422 - Stream: column 0 section ROW_INDEX start: 128531 length 15 - Stream: column 1 section ROW_INDEX start: 128546 length 153 - Stream: column 2 section ROW_INDEX start: 128699 length 169 - Stream: column 3 section ROW_INDEX start: 128868 length 85 - Stream: column 1 section DATA start: 128953 length 20029 - Stream: column 2 section DATA start: 148982 length 40035 - Stream: column 3 section DATA start: 189017 length 3544 - Stream: column 3 section LENGTH start: 192561 length 25 - Stream: column 3 section DICTIONARY_DATA start: 192586 length 133 + Entry 4: count: 1000 min: Darkness, max: worst positions: 0,2978,88 + Stripe: offset: 128529 data: 63766 rows: 5000 tail: 79 index: 422 + Stream: column 0 section ROW_INDEX start: 128529 length 15 + Stream: column 1 section ROW_INDEX start: 128544 length 153 + Stream: column 2 section ROW_INDEX start: 128697 length 169 + Stream: column 3 section ROW_INDEX start: 128866 length 85 + Stream: column 1 section DATA start: 128951 length 20029 + Stream: column 2 section DATA start: 148980 length 40035 + Stream: column 3 section DATA start: 189015 length 3544 + Stream: column 3 section LENGTH start: 192559 length 25 + Stream: column 3 section DICTIONARY_DATA start: 192584 length 133 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -134,16 +134,16 @@ Stripes: Entry 2: count: 1000 min: Darkness, max: worst positions: 0,1469,69 Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2133,194 Entry 4: count: 1000 min: Darkness, max: worst positions: 0,3005,43 - Stripe: offset: 192798 data: 63796 rows: 5000 tail: 79 index: 425 - Stream: column 0 section ROW_INDEX start: 192798 length 15 - Stream: column 1 section ROW_INDEX start: 192813 length 156 - Stream: column 2 section ROW_INDEX start: 192969 length 168 - Stream: column 3 section ROW_INDEX start: 193137 length 86 - Stream: column 1 section DATA start: 193223 length 20029 - Stream: column 2 section DATA start: 213252 length 40035 - Stream: column 3 section DATA start: 253287 length 3574 - Stream: column 3 section LENGTH start: 256861 length 25 - Stream: column 3 section DICTIONARY_DATA start: 256886 length 133 + Stripe: offset: 192796 data: 63796 rows: 5000 tail: 79 index: 425 + Stream: column 0 section ROW_INDEX start: 192796 length 15 + Stream: column 1 section ROW_INDEX start: 192811 length 156 + Stream: column 2 section ROW_INDEX start: 192967 length 168 + Stream: column 3 section ROW_INDEX start: 193135 length 86 + Stream: column 1 section DATA start: 193221 length 20029 + Stream: column 2 section DATA start: 213250 length 40035 + Stream: column 3 section DATA start: 253285 length 3574 + Stream: column 3 section LENGTH start: 256859 length 25 + Stream: column 3 section DICTIONARY_DATA start: 256884 length 133 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -166,16 +166,16 @@ Stripes: Entry 2: count: 1000 min: Darkness, max: worst positions: 0,1485,52 Entry 3: count: 1000 min: Darkness, max: worst positions: 0,2196,104 Entry 4: count: 1000 min: Darkness, max: worst positions: 0,2934,131 - Stripe: offset: 257098 data: 12940 rows: 1000 tail: 71 index: 123 - Stream: column 0 section ROW_INDEX start: 257098 length 10 - Stream: column 1 section ROW_INDEX start: 257108 length 36 - Stream: column 2 section ROW_INDEX start: 257144 length 39 - Stream: column 3 section ROW_INDEX start: 257183 length 38 - Stream: column 1 section DATA start: 257221 length 4007 - Stream: column 2 section DATA start: 261228 length 8007 - Stream: column 3 section DATA start: 269235 length 768 - Stream: column 3 section LENGTH start: 270003 length 25 - Stream: column 3 section DICTIONARY_DATA start: 270028 length 133 + Stripe: offset: 257096 data: 12940 rows: 1000 tail: 71 index: 123 + Stream: column 0 section ROW_INDEX start: 257096 length 10 + Stream: column 1 section ROW_INDEX start: 257106 length 36 + Stream: column 2 section ROW_INDEX start: 257142 length 39 + Stream: column 3 section ROW_INDEX start: 257181 length 38 + Stream: column 1 section DATA start: 257219 length 4007 + Stream: column 2 section DATA start: 261226 length 8007 + Stream: column 3 section DATA start: 269233 length 768 + Stream: column 3 section LENGTH start: 270001 length 25 + Stream: column 3 section DICTIONARY_DATA start: 270026 length 133 Encoding column 0: DIRECT Encoding column 1: DIRECT_V2 Encoding column 2: DIRECT_V2 @@ -187,6 +187,6 @@ Stripes: Row group index column 3: Entry 0: count: 1000 min: Darkness, max: worst positions: 0,0,0 -File length: 270759 bytes +File length: 270760 bytes Padding length: 0 bytes Padding ratio: 0% diff --git a/ql/src/test/results/clientnegative/acid_overwrite.q.out b/ql/src/test/results/clientnegative/acid_overwrite.q.out index f28e0a4..0940106 100644 --- a/ql/src/test/results/clientnegative/acid_overwrite.q.out +++ b/ql/src/test/results/clientnegative/acid_overwrite.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uanp -POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uanp diff --git a/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out b/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out new file mode 100644 index 0000000..d2c252f --- /dev/null +++ b/ql/src/test/results/clientnegative/alter_partition_change_col_dup_col.q.out @@ -0,0 +1,21 @@ +PREHOOK: query: create table alter_partition_change_col_dup_col (c1 string, c2 decimal(10,0)) partitioned by (p1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter_partition_change_col_dup_col +POSTHOOK: query: create table alter_partition_change_col_dup_col (c1 string, c2 decimal(10,0)) partitioned by (p1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter_partition_change_col_dup_col +PREHOOK: query: alter table alter_partition_change_col_dup_col add partition (p1='abc') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@alter_partition_change_col_dup_col +POSTHOOK: query: alter table alter_partition_change_col_dup_col add partition (p1='abc') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@alter_partition_change_col_dup_col +POSTHOOK: Output: default@alter_partition_change_col_dup_col@p1=abc +PREHOOK: query: -- should fail because of duplicate name c1 +alter table alter_partition_change_col_dup_col change c2 c1 decimal(14,4) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col_dup_col +PREHOOK: Output: default@alter_partition_change_col_dup_col +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Duplicate column name: c1 diff --git a/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out b/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out new file mode 100644 index 0000000..8e4422e --- /dev/null +++ b/ql/src/test/results/clientnegative/alter_partition_change_col_nonexist.q.out @@ -0,0 +1,21 @@ +PREHOOK: query: create table alter_partition_change_col_nonexist (c1 string, c2 decimal(10,0)) partitioned by (p1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter_partition_change_col_nonexist +POSTHOOK: query: create table alter_partition_change_col_nonexist (c1 string, c2 decimal(10,0)) partitioned by (p1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter_partition_change_col_nonexist +PREHOOK: query: alter table alter_partition_change_col_nonexist add partition (p1='abc') +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@alter_partition_change_col_nonexist +POSTHOOK: query: alter table alter_partition_change_col_nonexist add partition (p1='abc') +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@alter_partition_change_col_nonexist +POSTHOOK: Output: default@alter_partition_change_col_nonexist@p1=abc +PREHOOK: query: -- should fail because of nonexistent column c3 +alter table alter_partition_change_col_nonexist change c3 c4 decimal(14,4) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col_nonexist +PREHOOK: Output: default@alter_partition_change_col_nonexist +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Invalid column reference c3 diff --git a/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out b/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out index 33eab12..197cd1d 100644 --- a/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out +++ b/ql/src/test/results/clientnegative/alter_partition_with_whitelist.q.out @@ -27,4 +27,4 @@ PREHOOK: query: ALTER TABLE part_whitelist_test PARTITION (ds='1') rename to par PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@part_whitelist_test PREHOOK: Output: default@part_whitelist_test@ds=1 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. Partition value '1,2,3' contains a character not matched by whitelist pattern '[\\x20-\\x7E&&[^,]]*'. (configure with hive.metastore.partition.name.whitelist.pattern) diff --git a/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out b/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out index 635df4e..acb66ac 100644 --- a/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out +++ b/ql/src/test/results/clientnegative/alter_rename_partition_failure2.q.out @@ -35,4 +35,4 @@ PREHOOK: query: alter table alter_rename_partition partition (pCol1='old_part1:' PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@alter_rename_partition PREHOOK: Output: default@alter_rename_partition@pcol1=old_part1%3A/pcol2=old_part2%3A -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to rename partition. Partition already exists:default.alter_rename_partition.[old_part1:, old_part2:] diff --git a/ql/src/test/results/clientnegative/authorization_delete_nodeletepriv.q.out b/ql/src/test/results/clientnegative/authorization_delete_nodeletepriv.q.out index 00fb3f7..2fc358a 100644 --- a/ql/src/test/results/clientnegative/authorization_delete_nodeletepriv.q.out +++ b/ql/src/test/results/clientnegative/authorization_delete_nodeletepriv.q.out @@ -1,10 +1,10 @@ PREHOOK: query: -- check update without update priv -create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc +create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@auth_nodel POSTHOOK: query: -- check update without update priv -create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc +create table auth_nodel(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@auth_nodel diff --git a/ql/src/test/results/clientnegative/authorization_not_owner_drop_tab2.q.out b/ql/src/test/results/clientnegative/authorization_not_owner_drop_tab2.q.out new file mode 100644 index 0000000..0308e09 --- /dev/null +++ b/ql/src/test/results/clientnegative/authorization_not_owner_drop_tab2.q.out @@ -0,0 +1,29 @@ +PREHOOK: query: create database db1 +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:db1 +POSTHOOK: query: create database db1 +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:db1 +PREHOOK: query: use db1 +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:db1 +POSTHOOK: query: use db1 +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:db1 +PREHOOK: query: -- check if create table fails as different user. use db.table sytax +create table t1(i int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@t1 +POSTHOOK: query: -- check if create table fails as different user. use db.table sytax +create table t1(i int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:db1 +POSTHOOK: Output: db1@t1 +PREHOOK: query: use default +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:default +POSTHOOK: query: use default +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:default +FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation DROPTABLE [[OBJECT OWNERSHIP] on Object [type=TABLE_OR_VIEW, name=db1.t1]] diff --git a/ql/src/test/results/clientnegative/authorization_sba_drop_table.q.out b/ql/src/test/results/clientnegative/authorization_sba_drop_table.q.out new file mode 100644 index 0000000..39cf42e --- /dev/null +++ b/ql/src/test/results/clientnegative/authorization_sba_drop_table.q.out @@ -0,0 +1,16 @@ +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: -- Attempt to drop table without having write permissions on table dir should result in error +drop table t1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/authorization_show_columns.q.out b/ql/src/test/results/clientnegative/authorization_show_columns.q.out new file mode 100644 index 0000000..7dca55e --- /dev/null +++ b/ql/src/test/results/clientnegative/authorization_show_columns.q.out @@ -0,0 +1,23 @@ +PREHOOK: query: create database db1 +PREHOOK: type: CREATEDATABASE +PREHOOK: Output: database:db1 +POSTHOOK: query: create database db1 +POSTHOOK: type: CREATEDATABASE +POSTHOOK: Output: database:db1 +PREHOOK: query: use db1 +PREHOOK: type: SWITCHDATABASE +PREHOOK: Input: database:db1 +POSTHOOK: query: use db1 +POSTHOOK: type: SWITCHDATABASE +POSTHOOK: Input: database:db1 +PREHOOK: query: -- check query without select privilege fails +create table t1(i int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:db1 +PREHOOK: Output: db1@t1 +POSTHOOK: query: -- check query without select privilege fails +create table t1(i int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:db1 +POSTHOOK: Output: db1@t1 +FAILED: HiveAccessControlException Permission denied: Principal [name=user1, type=USER] does not have following privileges for operation SHOWCOLUMNS [[SELECT] on Object [type=TABLE_OR_VIEW, name=db1.t1]] diff --git a/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out b/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out index e4b3c5e..c39c6d7 100644 --- a/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out +++ b/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out @@ -1,10 +1,10 @@ PREHOOK: query: -- check update without update priv -create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc +create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@auth_noupd POSTHOOK: query: -- check update without update priv -create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc +create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@auth_noupd diff --git a/ql/src/test/results/clientnegative/delete_non_acid_table.q.out b/ql/src/test/results/clientnegative/delete_non_acid_table.q.out new file mode 100644 index 0000000..a9b884a --- /dev/null +++ b/ql/src/test/results/clientnegative/delete_non_acid_table.q.out @@ -0,0 +1,37 @@ +PREHOOK: query: create table not_an_acid_table2(a int, b varchar(128)) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@not_an_acid_table2 +POSTHOOK: query: create table not_an_acid_table2(a int, b varchar(128)) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@not_an_acid_table2 +PREHOOK: query: insert into table not_an_acid_table2 select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@not_an_acid_table2 +POSTHOOK: query: insert into table not_an_acid_table2 select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@not_an_acid_table2 +POSTHOOK: Lineage: not_an_acid_table2.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: not_an_acid_table2.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: select a,b from not_an_acid_table2 order by a +PREHOOK: type: QUERY +PREHOOK: Input: default@not_an_acid_table2 +#### A masked pattern was here #### +POSTHOOK: query: select a,b from not_an_acid_table2 order by a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@not_an_acid_table2 +#### A masked pattern was here #### +-1073279343 oj1YrV5Wa +-1073051226 A34p7oRr2WvUJNf +-1072910839 0iqrc5 +-1072081801 dPkN74F7 +-1072076362 2uLyD28144vklju213J1mr +-1071480828 aw724t8c5558x2xneC624 +-1071363017 Anj0oF +-1070883071 0ruyd6Y50JpdGRf6HqD +-1070551679 iUR3Q +-1069736047 k17Am8uPHWk02cEf1jet +FAILED: SemanticException [Error 10297]: Attempt to do update or delete on table default.not_an_acid_table2 that does not use an AcidOutputFormat or is not bucketed diff --git a/ql/src/test/results/clientnegative/delete_not_bucketed.q.out b/ql/src/test/results/clientnegative/delete_not_bucketed.q.out new file mode 100644 index 0000000..d0ba680 --- /dev/null +++ b/ql/src/test/results/clientnegative/delete_not_bucketed.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_notbucketed +FAILED: SemanticException [Error 10297]: Attempt to do update or delete on table acid_notbucketed that does not use an AcidOutputFormat or is not bucketed diff --git a/ql/src/test/results/clientnegative/delete_sorted.q.out b/ql/src/test/results/clientnegative/delete_sorted.q.out new file mode 100644 index 0000000..0d248d0 --- /dev/null +++ b/ql/src/test/results/clientnegative/delete_sorted.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_insertsort +POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) partitioned by (ds string) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_insertsort +FAILED: SemanticException [Error 10298]: ACID insert, update, delete not supported on tables that are sorted, table acid_insertsort diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out index 8fee4e1..96600eb 100644 --- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out +++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out @@ -107,4 +107,5 @@ ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@test_table123 PREHOOK: Output: default@test_table123 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +b diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out index c32efa6..6291feb 100644 --- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out +++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on2.q.out @@ -40,4 +40,5 @@ PREHOOK: query: ALTER TABLE test_table123 CHANGE COLUMN b b MAP PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@test_table123 PREHOOK: Output: default@test_table123 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. The following columns have types incompatible with the existing columns in their respective positions : +b diff --git a/ql/src/test/results/clientnegative/insert_sorted.q.out b/ql/src/test/results/clientnegative/insert_sorted.q.out new file mode 100644 index 0000000..50dd5eb --- /dev/null +++ b/ql/src/test/results/clientnegative/insert_sorted.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_insertsort +POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_insertsort +FAILED: SemanticException [Error 10298]: ACID insert, update, delete not supported on tables that are sorted, table acid_insertsort diff --git a/ql/src/test/results/clientnegative/insert_values_sorted.q.out b/ql/src/test/results/clientnegative/insert_values_sorted.q.out new file mode 100644 index 0000000..50dd5eb --- /dev/null +++ b/ql/src/test/results/clientnegative/insert_values_sorted.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_insertsort +POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_insertsort +FAILED: SemanticException [Error 10298]: ACID insert, update, delete not supported on tables that are sorted, table acid_insertsort diff --git a/ql/src/test/results/clientnegative/limit_partition_stats.q.out b/ql/src/test/results/clientnegative/limit_partition_stats.q.out index 5a5fe1f..b412358 100644 --- a/ql/src/test/results/clientnegative/limit_partition_stats.q.out +++ b/ql/src/test/results/clientnegative/limit_partition_stats.q.out @@ -43,9 +43,11 @@ STAGE PLANS: PREHOOK: query: select count(*) from part PREHOOK: type: QUERY +PREHOOK: Input: default@part #### A masked pattern was here #### POSTHOOK: query: select count(*) from part POSTHOOK: type: QUERY +POSTHOOK: Input: default@part #### A masked pattern was here #### 2000 PREHOOK: query: explain select count(*) from part @@ -62,31 +64,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: part - Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 4000 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientnegative/temp_table_rename.q.out b/ql/src/test/results/clientnegative/temp_table_rename.q.out index 7622a25..0956aba 100644 --- a/ql/src/test/results/clientnegative/temp_table_rename.q.out +++ b/ql/src/test/results/clientnegative/temp_table_rename.q.out @@ -18,4 +18,4 @@ PREHOOK: query: alter table tmp2 rename to tmp1 PREHOOK: type: ALTERTABLE_RENAME PREHOOK: Input: default@tmp2 PREHOOK: Output: default@tmp2 -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Unable to alter table. Cannot rename temporary table to tmp1 - temporary table already exists with the same name diff --git a/ql/src/test/results/clientnegative/udf_local_resource.q.out b/ql/src/test/results/clientnegative/udf_local_resource.q.out index 2ebcb3b..2f2227b 100644 --- a/ql/src/test/results/clientnegative/udf_local_resource.q.out +++ b/ql/src/test/results/clientnegative/udf_local_resource.q.out @@ -1,5 +1,6 @@ PREHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file '../../data/files/sales.txt' PREHOOK: type: CREATEFUNCTION +#### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default.lookup FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. Hive warehouse is non-local, but ../../data/files/sales.txt specifies file on local filesystem. Resources on non-local warehouse should specify a non-local scheme/path diff --git a/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out b/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out index 60935bf..e184787 100644 --- a/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out +++ b/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out @@ -2,5 +2,6 @@ PREHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFile PREHOOK: type: CREATEFUNCTION PREHOOK: Output: database:default PREHOOK: Output: default.lookup +PREHOOK: Output: nonexistent_file.txt nonexistent_file.txt does not exist FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. nonexistent_file.txt does not exist diff --git a/ql/src/test/results/clientnegative/update_non_acid_table.q.out b/ql/src/test/results/clientnegative/update_non_acid_table.q.out new file mode 100644 index 0000000..381b0db --- /dev/null +++ b/ql/src/test/results/clientnegative/update_non_acid_table.q.out @@ -0,0 +1,37 @@ +PREHOOK: query: create table not_an_acid_table(a int, b varchar(128)) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@not_an_acid_table +POSTHOOK: query: create table not_an_acid_table(a int, b varchar(128)) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@not_an_acid_table +PREHOOK: query: insert into table not_an_acid_table select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@not_an_acid_table +POSTHOOK: query: insert into table not_an_acid_table select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@not_an_acid_table +POSTHOOK: Lineage: not_an_acid_table.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: not_an_acid_table.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: select a,b from not_an_acid_table order by a +PREHOOK: type: QUERY +PREHOOK: Input: default@not_an_acid_table +#### A masked pattern was here #### +POSTHOOK: query: select a,b from not_an_acid_table order by a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@not_an_acid_table +#### A masked pattern was here #### +-1073279343 oj1YrV5Wa +-1073051226 A34p7oRr2WvUJNf +-1072910839 0iqrc5 +-1072081801 dPkN74F7 +-1072076362 2uLyD28144vklju213J1mr +-1071480828 aw724t8c5558x2xneC624 +-1071363017 Anj0oF +-1070883071 0ruyd6Y50JpdGRf6HqD +-1070551679 iUR3Q +-1069736047 k17Am8uPHWk02cEf1jet +FAILED: SemanticException [Error 10297]: Attempt to do update or delete on table default.not_an_acid_table that does not use an AcidOutputFormat or is not bucketed diff --git a/ql/src/test/results/clientnegative/update_not_bucketed.q.out b/ql/src/test/results/clientnegative/update_not_bucketed.q.out new file mode 100644 index 0000000..8ebf41d --- /dev/null +++ b/ql/src/test/results/clientnegative/update_not_bucketed.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) partitioned by (ds string) stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) partitioned by (ds string) stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_notbucketed +FAILED: SemanticException [Error 10297]: Attempt to do update or delete on table acid_notbucketed that does not use an AcidOutputFormat or is not bucketed diff --git a/ql/src/test/results/clientnegative/update_partition_col.q.out b/ql/src/test/results/clientnegative/update_partition_col.q.out index 003b53f..81c5014 100644 --- a/ql/src/test/results/clientnegative/update_partition_col.q.out +++ b/ql/src/test/results/clientnegative/update_partition_col.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@foo -POSTHOOK: query: create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table foo(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@foo diff --git a/ql/src/test/results/clientnegative/update_sorted.q.out b/ql/src/test/results/clientnegative/update_sorted.q.out new file mode 100644 index 0000000..50dd5eb --- /dev/null +++ b/ql/src/test/results/clientnegative/update_sorted.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_insertsort +POSTHOOK: query: create table acid_insertsort(a int, b varchar(128)) clustered by (a) sorted by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_insertsort +FAILED: SemanticException [Error 10298]: ACID insert, update, delete not supported on tables that are sorted, table acid_insertsort diff --git a/ql/src/test/results/clientpositive/acid_vectorization.q.out b/ql/src/test/results/clientpositive/acid_vectorization.q.out index 4a9d19f..18dada5 100644 --- a/ql/src/test/results/clientpositive/acid_vectorization.q.out +++ b/ql/src/test/results/clientpositive/acid_vectorization.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC +PREHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_vectorized -POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC +POSTHOOK: query: CREATE TABLE acid_vectorized(a INT, b STRING) CLUSTERED BY(a) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_vectorized diff --git a/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out index eae45b2..f8486ad 100644 --- a/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out +++ b/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out @@ -65,9 +65,11 @@ value string #### A masked pattern was here #### PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_stat PREHOOK: Output: default@src_orc_merge_test_stat POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: desc formatted src_orc_merge_test_stat PREHOOK: type: DESCTABLE @@ -115,9 +117,11 @@ POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_stat PREHOOK: Output: default@src_orc_merge_test_stat POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: desc formatted src_orc_merge_test_stat PREHOOK: type: DESCTABLE @@ -243,8 +247,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 3 - numRows 500 - rawDataSize 47000 + numRows 1500 + rawDataSize 141000 totalSize 7488 #### A masked pattern was here #### @@ -260,10 +264,12 @@ Storage Desc Params: serialization.format 1 PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') @@ -317,10 +323,12 @@ POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out index 5047b23..de3cae6 100644 --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out @@ -358,11 +358,11 @@ Table: tst1 Protect Mode: None #### A masked pattern was here #### Partition Parameters: - COLUMN_STATS_ACCURATE false + COLUMN_STATS_ACCURATE true #### A masked pattern was here #### numFiles 12 - numRows -1 - rawDataSize -1 + numRows 500 + rawDataSize 5312 totalSize 5812 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out new file mode 100644 index 0000000..7123e40 --- /dev/null +++ b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out @@ -0,0 +1,578 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +create table alter_partition_change_col0 (c1 string, c2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter_partition_change_col0 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +create table alter_partition_change_col0 (c1 string, c2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter_partition_change_col0 +PREHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_partition_change_col0 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@alter_partition_change_col0 +POSTHOOK: query: load data local inpath '../../data/files/dec.txt' overwrite into table alter_partition_change_col0 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@alter_partition_change_col0 +PREHOOK: query: create table alter_partition_change_col1 (c1 string, c2 string) partitioned by (p1 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alter_partition_change_col1 +POSTHOOK: query: create table alter_partition_change_col1 (c1 string, c2 string) partitioned by (p1 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alter_partition_change_col1 +PREHOOK: query: insert overwrite table alter_partition_change_col1 partition (p1) + select c1, c2, 'abc' from alter_partition_change_col0 + union all + select c1, c2, null from alter_partition_change_col0 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col0 +PREHOOK: Output: default@alter_partition_change_col1 +POSTHOOK: query: insert overwrite table alter_partition_change_col1 partition (p1) + select c1, c2, 'abc' from alter_partition_change_col0 + union all + select c1, c2, null from alter_partition_change_col0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col0 +POSTHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Output: default@alter_partition_change_col1@p1=abc +POSTHOOK: Lineage: alter_partition_change_col1 PARTITION(p1=__HIVE_DEFAULT_PARTITION__).c1 EXPRESSION [(alter_partition_change_col0)alter_partition_change_col0.FieldSchema(name:c1, type:string, comment:null), (alter_partition_change_col0)alter_partition_change_col0.FieldSchema(name:c1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_partition_change_col1 PARTITION(p1=__HIVE_DEFAULT_PARTITION__).c2 EXPRESSION [(alter_partition_change_col0)alter_partition_change_col0.FieldSchema(name:c2, type:string, comment:null), (alter_partition_change_col0)alter_partition_change_col0.FieldSchema(name:c2, type:string, comment:null), ] +POSTHOOK: Lineage: alter_partition_change_col1 PARTITION(p1=abc).c1 EXPRESSION [(alter_partition_change_col0)alter_partition_change_col0.FieldSchema(name:c1, type:string, comment:null), (alter_partition_change_col0)alter_partition_change_col0.FieldSchema(name:c1, type:string, comment:null), ] +POSTHOOK: Lineage: alter_partition_change_col1 PARTITION(p1=abc).c2 EXPRESSION [(alter_partition_change_col0)alter_partition_change_col0.FieldSchema(name:c2, type:string, comment:null), (alter_partition_change_col0)alter_partition_change_col0.FieldSchema(name:c2, type:string, comment:null), ] +PREHOOK: query: show partitions alter_partition_change_col1 +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: show partitions alter_partition_change_col1 +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@alter_partition_change_col1 +p1=__HIVE_DEFAULT_PARTITION__ +p1=abc +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck 0.0 __HIVE_DEFAULT_PARTITION__ +Beck 0.0 abc +Beck 77.341 __HIVE_DEFAULT_PARTITION__ +Beck 77.341 abc +Beck 79.9 __HIVE_DEFAULT_PARTITION__ +Beck 79.9 abc +Cluck 5.96 __HIVE_DEFAULT_PARTITION__ +Cluck 5.96 abc +Mary 33.33 __HIVE_DEFAULT_PARTITION__ +Mary 33.33 abc +Mary 4.329 __HIVE_DEFAULT_PARTITION__ +Mary 4.329 abc +Snow 55.71 __HIVE_DEFAULT_PARTITION__ +Snow 55.71 abc +Tom -12.25 __HIVE_DEFAULT_PARTITION__ +Tom -12.25 abc +Tom 19.00 __HIVE_DEFAULT_PARTITION__ +Tom 19.00 abc +Tom 234.79 __HIVE_DEFAULT_PARTITION__ +Tom 234.79 abc +PREHOOK: query: -- Change c2 to decimal(10,0) +alter table alter_partition_change_col1 change c2 c2 decimal(10,0) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1 +POSTHOOK: query: -- Change c2 to decimal(10,0) +alter table alter_partition_change_col1 change c2 c2 decimal(10,0) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Output: default@alter_partition_change_col1 +PREHOOK: query: alter table alter_partition_change_col1 partition (p1='abc') change c2 c2 decimal(10,0) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1@p1=abc +POSTHOOK: query: alter table alter_partition_change_col1 partition (p1='abc') change c2 c2 decimal(10,0) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +POSTHOOK: Output: default@alter_partition_change_col1@p1=abc +PREHOOK: query: alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') change c2 c2 decimal(10,0) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: query: alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') change c2 c2 decimal(10,0) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck 0 __HIVE_DEFAULT_PARTITION__ +Beck 0 abc +Beck 77 __HIVE_DEFAULT_PARTITION__ +Beck 77 abc +Beck 80 __HIVE_DEFAULT_PARTITION__ +Beck 80 abc +Cluck 6 __HIVE_DEFAULT_PARTITION__ +Cluck 6 abc +Mary 33 __HIVE_DEFAULT_PARTITION__ +Mary 33 abc +Mary 4 __HIVE_DEFAULT_PARTITION__ +Mary 4 abc +Snow 56 __HIVE_DEFAULT_PARTITION__ +Snow 56 abc +Tom -12 __HIVE_DEFAULT_PARTITION__ +Tom -12 abc +Tom 19 __HIVE_DEFAULT_PARTITION__ +Tom 19 abc +Tom 235 __HIVE_DEFAULT_PARTITION__ +Tom 235 abc +PREHOOK: query: -- Change the column type at the table level. Table-level describe shows the new type, but the existing partition does not. +alter table alter_partition_change_col1 change c2 c2 decimal(14,4) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1 +POSTHOOK: query: -- Change the column type at the table level. Table-level describe shows the new type, but the existing partition does not. +alter table alter_partition_change_col1 change c2 c2 decimal(14,4) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Output: default@alter_partition_change_col1 +PREHOOK: query: describe alter_partition_change_col1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(14,4) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(10,0) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck 0 __HIVE_DEFAULT_PARTITION__ +Beck 0 abc +Beck 77 __HIVE_DEFAULT_PARTITION__ +Beck 77 abc +Beck 80 __HIVE_DEFAULT_PARTITION__ +Beck 80 abc +Cluck 6 __HIVE_DEFAULT_PARTITION__ +Cluck 6 abc +Mary 33 __HIVE_DEFAULT_PARTITION__ +Mary 33 abc +Mary 4 __HIVE_DEFAULT_PARTITION__ +Mary 4 abc +Snow 56 __HIVE_DEFAULT_PARTITION__ +Snow 56 abc +Tom -12 __HIVE_DEFAULT_PARTITION__ +Tom -12 abc +Tom 19 __HIVE_DEFAULT_PARTITION__ +Tom 19 abc +Tom 235 __HIVE_DEFAULT_PARTITION__ +Tom 235 abc +PREHOOK: query: -- now change the column type of the existing partition +alter table alter_partition_change_col1 partition (p1='abc') change c2 c2 decimal(14,4) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1@p1=abc +POSTHOOK: query: -- now change the column type of the existing partition +alter table alter_partition_change_col1 partition (p1='abc') change c2 c2 decimal(14,4) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +POSTHOOK: Output: default@alter_partition_change_col1@p1=abc +PREHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(14,4) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck 0 __HIVE_DEFAULT_PARTITION__ +Beck 0.0 abc +Beck 77 __HIVE_DEFAULT_PARTITION__ +Beck 77.341 abc +Beck 79.9 abc +Beck 80 __HIVE_DEFAULT_PARTITION__ +Cluck 5.96 abc +Cluck 6 __HIVE_DEFAULT_PARTITION__ +Mary 33 __HIVE_DEFAULT_PARTITION__ +Mary 33.33 abc +Mary 4 __HIVE_DEFAULT_PARTITION__ +Mary 4.329 abc +Snow 55.71 abc +Snow 56 __HIVE_DEFAULT_PARTITION__ +Tom -12 __HIVE_DEFAULT_PARTITION__ +Tom -12.25 abc +Tom 19 __HIVE_DEFAULT_PARTITION__ +Tom 19.00 abc +Tom 234.79 abc +Tom 235 __HIVE_DEFAULT_PARTITION__ +PREHOOK: query: -- change column for default partition value +alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') change c2 c2 decimal(14,4) +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: query: -- change column for default partition value +alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') change c2 c2 decimal(14,4) +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: query: describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(14,4) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck 0.0 __HIVE_DEFAULT_PARTITION__ +Beck 0.0 abc +Beck 77.341 __HIVE_DEFAULT_PARTITION__ +Beck 77.341 abc +Beck 79.9 __HIVE_DEFAULT_PARTITION__ +Beck 79.9 abc +Cluck 5.96 __HIVE_DEFAULT_PARTITION__ +Cluck 5.96 abc +Mary 33.33 __HIVE_DEFAULT_PARTITION__ +Mary 33.33 abc +Mary 4.329 __HIVE_DEFAULT_PARTITION__ +Mary 4.329 abc +Snow 55.71 __HIVE_DEFAULT_PARTITION__ +Snow 55.71 abc +Tom -12.25 __HIVE_DEFAULT_PARTITION__ +Tom -12.25 abc +Tom 19.00 __HIVE_DEFAULT_PARTITION__ +Tom 19.00 abc +Tom 234.79 __HIVE_DEFAULT_PARTITION__ +Tom 234.79 abc +PREHOOK: query: -- Try out replace columns +alter table alter_partition_change_col1 partition (p1='abc') replace columns (c1 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1@p1=abc +POSTHOOK: query: -- Try out replace columns +alter table alter_partition_change_col1 partition (p1='abc') replace columns (c1 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +POSTHOOK: Output: default@alter_partition_change_col1@p1=abc +PREHOOK: query: describe alter_partition_change_col1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(14,4) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck 0.0 __HIVE_DEFAULT_PARTITION__ +Beck 77.341 __HIVE_DEFAULT_PARTITION__ +Beck 79.9 __HIVE_DEFAULT_PARTITION__ +Beck NULL abc +Beck NULL abc +Beck NULL abc +Cluck 5.96 __HIVE_DEFAULT_PARTITION__ +Cluck NULL abc +Mary 33.33 __HIVE_DEFAULT_PARTITION__ +Mary 4.329 __HIVE_DEFAULT_PARTITION__ +Mary NULL abc +Mary NULL abc +Snow 55.71 __HIVE_DEFAULT_PARTITION__ +Snow NULL abc +Tom -12.25 __HIVE_DEFAULT_PARTITION__ +Tom 19.00 __HIVE_DEFAULT_PARTITION__ +Tom 234.79 __HIVE_DEFAULT_PARTITION__ +Tom NULL abc +Tom NULL abc +Tom NULL abc +PREHOOK: query: alter table alter_partition_change_col1 replace columns (c1 string) +PREHOOK: type: ALTERTABLE_REPLACECOLS +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1 +POSTHOOK: query: alter table alter_partition_change_col1 replace columns (c1 string) +POSTHOOK: type: ALTERTABLE_REPLACECOLS +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Output: default@alter_partition_change_col1 +PREHOOK: query: describe alter_partition_change_col1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck __HIVE_DEFAULT_PARTITION__ +Beck __HIVE_DEFAULT_PARTITION__ +Beck __HIVE_DEFAULT_PARTITION__ +Beck abc +Beck abc +Beck abc +Cluck __HIVE_DEFAULT_PARTITION__ +Cluck abc +Mary __HIVE_DEFAULT_PARTITION__ +Mary __HIVE_DEFAULT_PARTITION__ +Mary abc +Mary abc +Snow __HIVE_DEFAULT_PARTITION__ +Snow abc +Tom __HIVE_DEFAULT_PARTITION__ +Tom __HIVE_DEFAULT_PARTITION__ +Tom __HIVE_DEFAULT_PARTITION__ +Tom abc +Tom abc +Tom abc +PREHOOK: query: -- Try add columns +alter table alter_partition_change_col1 add columns (c2 decimal(14,4)) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1 +POSTHOOK: query: -- Try add columns +alter table alter_partition_change_col1 add columns (c2 decimal(14,4)) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Output: default@alter_partition_change_col1 +PREHOOK: query: describe alter_partition_change_col1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(14,4) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck 0.0 __HIVE_DEFAULT_PARTITION__ +Beck 77.341 __HIVE_DEFAULT_PARTITION__ +Beck 79.9 __HIVE_DEFAULT_PARTITION__ +Beck NULL abc +Beck NULL abc +Beck NULL abc +Cluck 5.96 __HIVE_DEFAULT_PARTITION__ +Cluck NULL abc +Mary 33.33 __HIVE_DEFAULT_PARTITION__ +Mary 4.329 __HIVE_DEFAULT_PARTITION__ +Mary NULL abc +Mary NULL abc +Snow 55.71 __HIVE_DEFAULT_PARTITION__ +Snow NULL abc +Tom -12.25 __HIVE_DEFAULT_PARTITION__ +Tom 19.00 __HIVE_DEFAULT_PARTITION__ +Tom 234.79 __HIVE_DEFAULT_PARTITION__ +Tom NULL abc +Tom NULL abc +Tom NULL abc +PREHOOK: query: alter table alter_partition_change_col1 partition (p1='abc') add columns (c2 decimal(14,4)) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Output: default@alter_partition_change_col1@p1=abc +POSTHOOK: query: alter table alter_partition_change_col1 partition (p1='abc') add columns (c2 decimal(14,4)) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +POSTHOOK: Output: default@alter_partition_change_col1@p1=abc +PREHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: query: describe alter_partition_change_col1 partition (p1='abc') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@alter_partition_change_col1 +c1 string +c2 decimal(14,4) +p1 string + +# Partition Information +# col_name data_type comment + +p1 string +PREHOOK: query: select * from alter_partition_change_col1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alter_partition_change_col1 +PREHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +PREHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +POSTHOOK: query: select * from alter_partition_change_col1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alter_partition_change_col1 +POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Input: default@alter_partition_change_col1@p1=abc +#### A masked pattern was here #### +Beck 0.0 __HIVE_DEFAULT_PARTITION__ +Beck 0.0 abc +Beck 77.341 __HIVE_DEFAULT_PARTITION__ +Beck 77.341 abc +Beck 79.9 __HIVE_DEFAULT_PARTITION__ +Beck 79.9 abc +Cluck 5.96 __HIVE_DEFAULT_PARTITION__ +Cluck 5.96 abc +Mary 33.33 __HIVE_DEFAULT_PARTITION__ +Mary 33.33 abc +Mary 4.329 __HIVE_DEFAULT_PARTITION__ +Mary 4.329 abc +Snow 55.71 __HIVE_DEFAULT_PARTITION__ +Snow 55.71 abc +Tom -12.25 __HIVE_DEFAULT_PARTITION__ +Tom -12.25 abc +Tom 19.00 __HIVE_DEFAULT_PARTITION__ +Tom 19.00 abc +Tom 234.79 __HIVE_DEFAULT_PARTITION__ +Tom 234.79 abc diff --git a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out index 4f26fec..f71fa05 100644 --- a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out @@ -121,18 +121,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alter_coltype - Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -240,17 +240,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -350,18 +350,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alter_coltype - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -422,17 +422,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -511,18 +511,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alter_coltype - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -583,17 +583,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -716,48 +716,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alter_coltype - Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string), value (type: string), dt (type: string), ts (type: double) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string:string:string:double - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: ts=3.0 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -801,9 +767,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.alter_coltype name: default.alter_coltype -#### A masked pattern was here #### Partition - base file name: ts=6.30 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -847,15 +811,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.alter_coltype name: default.alter_coltype - Truncated Path -> Alias: - /alter_coltype/dt=100/ts=3.0 [alter_coltype] - /alter_coltype/dt=100/ts=6.30 [alter_coltype] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: alter_coltype + Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: key (type: string), value (type: string), dt (type: string), ts (type: double) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 50 Data size: 382 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select count(*) from alter_coltype where ts = 3.0 PREHOOK: type: QUERY @@ -1020,48 +985,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alterdynamic_part_table - Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: intcol (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: partcol2=1 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -1076,10 +1007,10 @@ STAGE PLANS: #### A masked pattern was here #### name pt.alterdynamic_part_table numFiles 2 - numRows 1 + numRows 2 partition_columns partcol1/partcol2 partition_columns.types int:string - rawDataSize 2 + rawDataSize 3 serialization.ddl struct alterdynamic_part_table { string intcol} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1105,14 +1036,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: pt.alterdynamic_part_table name: pt.alterdynamic_part_table - Truncated Path -> Alias: - /pt.db/alterdynamic_part_table/partcol1=1/partcol2=1 [alterdynamic_part_table] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: alterdynamic_part_table + Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: intcol (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 3 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: explain extended select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: QUERY @@ -1157,48 +1090,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alterdynamic_part_table - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: intcol (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: partcol2=1 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -1242,14 +1141,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: pt.alterdynamic_part_table name: pt.alterdynamic_part_table - Truncated Path -> Alias: - /pt.db/alterdynamic_part_table/partcol1=2/partcol2=1 [alterdynamic_part_table] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: alterdynamic_part_table + Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: intcol (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select intcol from pt.alterdynamic_part_table where (partcol1='2' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/alter_table_location.q.out b/ql/src/test/results/clientpositive/alter_table_location.q.out new file mode 100644 index 0000000..167a936 --- /dev/null +++ b/ql/src/test/results/clientpositive/alter_table_location.q.out @@ -0,0 +1,55 @@ +PREHOOK: query: drop table if exists hcat_altertable_16 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists hcat_altertable_16 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table hcat_altertable_16(a int, b string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@hcat_altertable_16 +POSTHOOK: query: create table hcat_altertable_16(a int, b string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@hcat_altertable_16 +PREHOOK: query: show table extended like hcat_altertable_16 +PREHOOK: type: SHOW_TABLESTATUS +POSTHOOK: query: show table extended like hcat_altertable_16 +POSTHOOK: type: SHOW_TABLESTATUS +tableName:hcat_altertable_16 +#### A masked pattern was here #### +inputformat:org.apache.hadoop.mapred.TextInputFormat +outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +columns:struct columns { i32 a, string b} +partitioned:false +partitionColumns: +totalNumberFiles:0 +totalFileSize:0 +maxFileSize:0 +minFileSize:0 +#### A masked pattern was here #### + +#### A masked pattern was here #### +PREHOOK: type: ALTERTABLE_LOCATION +PREHOOK: Input: default@hcat_altertable_16 +PREHOOK: Output: default@hcat_altertable_16 +#### A masked pattern was here #### +POSTHOOK: type: ALTERTABLE_LOCATION +POSTHOOK: Input: default@hcat_altertable_16 +POSTHOOK: Output: default@hcat_altertable_16 +#### A masked pattern was here #### +PREHOOK: query: show table extended like hcat_altertable_16 +PREHOOK: type: SHOW_TABLESTATUS +POSTHOOK: query: show table extended like hcat_altertable_16 +POSTHOOK: type: SHOW_TABLESTATUS +tableName:hcat_altertable_16 +#### A masked pattern was here #### +inputformat:org.apache.hadoop.mapred.TextInputFormat +outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +columns:struct columns { i32 a, string b} +partitioned:false +partitionColumns: +totalNumberFiles:unknown +totalFileSize:unknown +maxFileSize:unknown +minFileSize:unknown +#### A masked pattern was here #### + diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out index 871c4217..1459b44 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out @@ -1,4 +1,24 @@ -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: -- hash aggregation is disabled + +-- There are different cases for Group By depending on map/reduce side, hash aggregation, +-- grouping sets and column stats. If we don't have column stats, we just assume hash +-- aggregation is disabled. Following are the possible cases and rule for cardinality +-- estimation + +-- MAP SIDE: +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet +-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) +-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) +-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet + +-- REDUCE SIDE: +-- Case 7: NO column stats — numRows / 2 +-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) +-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) + +create table if not exists loc_staging ( state string, locid int, zip bigint, @@ -7,7 +27,27 @@ PREHOOK: query: create table if not exists loc_staging ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +POSTHOOK: query: -- hash aggregation is disabled + +-- There are different cases for Group By depending on map/reduce side, hash aggregation, +-- grouping sets and column stats. If we don't have column stats, we just assume hash +-- aggregation is disabled. Following are the possible cases and rule for cardinality +-- estimation + +-- MAP SIDE: +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet +-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) +-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) +-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet + +-- REDUCE SIDE: +-- Case 7: NO column stats — numRows / 2 +-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) +-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) + +create table if not exists loc_staging ( state string, locid int, zip bigint, @@ -190,22 +230,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year PREHOOK: type: QUERY PREHOOK: Input: default@loc_orc #### A masked pattern was here #### -POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,year POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- only one distinct value in year column + 1 NULL value --- map-side GBY: numRows: 8 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 +PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 2 explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- only one distinct value in year column + 1 NULL value --- map-side GBY: numRows: 8 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 +POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 2 explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -257,12 +295,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY: numRows: 8 --- reduce-side GBY: numRows: 4 +PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 8 explain select state,locid from loc_orc group by state,locid PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY: numRows: 8 --- reduce-side GBY: numRows: 4 +POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 8 explain select state,locid from loc_orc group by state,locid POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -295,14 +333,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -314,10 +352,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 32 explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 32 explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -339,25 +379,25 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -369,10 +409,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 +-- Case 8: column stats, grouping sets - cardinality = 24 explain select state,locid from loc_orc group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 +-- Case 8: column stats, grouping sets - cardinality = 24 explain select state,locid from loc_orc group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -394,25 +436,25 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 12 Data size: 2100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 1080 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 1080 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -424,10 +466,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 +-- Case 8: column stats, grouping sets - cardinality = 8 explain select state,locid from loc_orc group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 +-- Case 8: column stats, grouping sets - cardinality = 8 explain select state,locid from loc_orc group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -449,25 +493,25 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -479,10 +523,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 +-- Case 8: column stats, grouping sets - cardinality = 16 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 +-- Case 8: column stats, grouping sets - cardinality = 16 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -504,25 +550,25 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -534,10 +580,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 +-- Case 8: column stats, grouping sets - cardinality = 24 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 +-- Case 8: column stats, grouping sets - cardinality = 24 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -559,25 +607,25 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 12 Data size: 2100 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 1080 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 1080 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 2160 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -589,10 +637,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 32 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 32 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -614,25 +664,25 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 2880 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -644,12 +694,16 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY: numRows: 80 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) +PREHOOK: query: -- map-side parallelism will be 10 + +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 +-- Case 9: column stats, NO grouping sets - caridnality = 2 explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY: numRows: 80 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) +POSTHOOK: query: -- map-side parallelism will be 10 + +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 +-- Case 9: column stats, NO grouping sets - caridnality = 2 explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -671,25 +725,25 @@ STAGE PLANS: keys: year (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 80 Data size: 280 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 80 Data size: 280 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -701,10 +755,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) +PREHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 +-- Case 8: column stats, grouping sets - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) +POSTHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 +-- Case 8: column stats, grouping sets - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -726,25 +782,25 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 35 Data size: 6125 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 35 Data size: 3150 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 35 Data size: 3150 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 1440 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -756,10 +812,71 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +PREHOOK: query: -- ndvProduct becomes 0 as zip does not have column stats +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state,zip from loc_orc group by state,zip +PREHOOK: type: QUERY +POSTHOOK: query: -- ndvProduct becomes 0 as zip does not have column stats +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state,zip from loc_orc group by state,zip +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: state (type: string), zip (type: bigint) + outputColumnNames: state, zip + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + keys: state (type: string), zip (type: bigint) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 4 Data size: 344 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) + Statistics: Num rows: 4 Data size: 344 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 172 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 172 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 172 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -811,10 +928,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 +-- Case 7: NO column stats - cardinality = 12 explain select state,locid from loc_orc group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 +-- Case 7: NO column stats - cardinality = 12 explain select state,locid from loc_orc group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -866,10 +985,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 7: NO column stats - cardinality = 4 explain select state,locid from loc_orc group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 7: NO column stats - cardinality = 4 explain select state,locid from loc_orc group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -921,10 +1042,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 +-- Case 7: NO column stats - cardinality = 8 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 +-- Case 7: NO column stats - cardinality = 8 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -976,10 +1099,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 +-- Case 7: NO column stats - cardinality = 12 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 +-- Case 7: NO column stats - cardinality = 12 explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1031,10 +1156,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1086,12 +1213,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY: numRows: 80 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) +PREHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 7: NO column stats - cardinality = 4 explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY: numRows: 80 (map-side will not do any reduction) --- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) +POSTHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 +-- Case 7: NO column stats - cardinality = 4 explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1113,25 +1240,25 @@ STAGE PLANS: keys: year (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 80 Data size: 7960 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 80 Data size: 7960 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1143,10 +1270,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) +PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) +POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 +-- Case 7: NO column stats - cardinality = 16 explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1168,25 +1297,25 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out new file mode 100644 index 0000000..f991191 --- /dev/null +++ b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out @@ -0,0 +1,485 @@ +PREHOOK: query: drop table location +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table location +POSTHOOK: type: DROPTABLE +PREHOOK: query: -- There are different cases for Group By depending on map/reduce side, hash aggregation, +-- grouping sets and column stats. If we don't have column stats, we just assume hash +-- aggregation is disabled. Following are the possible cases and rule for cardinality +-- estimation + +-- MAP SIDE: +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet +-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) +-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) +-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet + +-- REDUCE SIDE: +-- Case 7: NO column stats — numRows / 2 +-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) +-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) + +create table location (state string, country string, votes bigint) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@location +POSTHOOK: query: -- There are different cases for Group By depending on map/reduce side, hash aggregation, +-- grouping sets and column stats. If we don't have column stats, we just assume hash +-- aggregation is disabled. Following are the possible cases and rule for cardinality +-- estimation + +-- MAP SIDE: +-- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet +-- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) +-- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) +-- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows +-- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet + +-- REDUCE SIDE: +-- Case 7: NO column stats — numRows / 2 +-- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) +-- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) + +create table location (state string, country string, votes bigint) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@location +PREHOOK: query: load data local inpath "../../data/files/location.txt" overwrite into table location +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@location +POSTHOOK: query: load data local inpath "../../data/files/location.txt" overwrite into table location +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@location +PREHOOK: query: analyze table location compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@location +PREHOOK: Output: default@location +POSTHOOK: query: analyze table location compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@location +POSTHOOK: Output: default@location +PREHOOK: query: analyze table location compute statistics for columns state, country +PREHOOK: type: QUERY +PREHOOK: Input: default@location +#### A masked pattern was here #### +POSTHOOK: query: analyze table location compute statistics for columns state, country +POSTHOOK: type: QUERY +POSTHOOK: Input: default@location +#### A masked pattern was here #### +PREHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 20 +-- Case 7: NO column stats - cardinality = 10 +explain select state, country from location group by state, country +PREHOOK: type: QUERY +POSTHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 20 +-- Case 7: NO column stats - cardinality = 10 +explain select state, country from location group by state, country +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: location + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), country (type: string) + outputColumnNames: state, country + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), country (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 80 +-- Case 7: NO column stats - cardinality = 40 +explain select state, country from location group by state, country with cube +PREHOOK: type: QUERY +POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 80 +-- Case 7: NO column stats - cardinality = 40 +explain select state, country from location group by state, country with cube +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: location + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), country (type: string) + outputColumnNames: state, country + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), country (type: string), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 80 Data size: 800 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) + Statistics: Num rows: 80 Data size: 800 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 40 Data size: 400 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 40 Data size: 400 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 40 Data size: 400 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- parallelism = 4 + +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state, country from location group by state, country +PREHOOK: type: QUERY +POSTHOOK: query: -- parallelism = 4 + +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 8 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state, country from location group by state, country +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: location + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), country (type: string) + outputColumnNames: state, country + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), country (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- column stats for votes is missing, so ndvProduct becomes 0 and will be set to numRows / 2 +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 10 +-- Case 9: column stats, NO grouping sets - caridnality = 5 +explain select state, votes from location group by state, votes +PREHOOK: type: QUERY +POSTHOOK: query: -- column stats for votes is missing, so ndvProduct becomes 0 and will be set to numRows / 2 +-- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 10 +-- Case 9: column stats, NO grouping sets - caridnality = 5 +explain select state, votes from location group by state, votes +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: location + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: state (type: string), votes (type: bigint) + outputColumnNames: state, votes + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + keys: state (type: string), votes (type: bigint) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 10 Data size: 860 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) + Statistics: Num rows: 10 Data size: 860 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 430 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 430 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 430 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 8 +explain select state, country from location group by state, country with cube +PREHOOK: type: QUERY +POSTHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 32 +-- Case 8: column stats, grouping sets - cardinality = 8 +explain select state, country from location group by state, country with cube +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: location + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), country (type: string) + outputColumnNames: state, country + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), country (type: string), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 32 Data size: 8256 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) + Statistics: Num rows: 32 Data size: 8256 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 8 Data size: 2064 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 20 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state, country from location group by state, country +PREHOOK: type: QUERY +POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 20 +-- Case 9: column stats, NO grouping sets - caridnality = 2 +explain select state, country from location group by state, country +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: location + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), country (type: string) + outputColumnNames: state, country + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), country (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 20 Data size: 3460 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 346 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 80 +-- Case 8: column stats, grouping sets - cardinality = 8 +explain select state, country from location group by state, country with cube +PREHOOK: type: QUERY +POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 80 +-- Case 8: column stats, grouping sets - cardinality = 8 +explain select state, country from location group by state, country with cube +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: location + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), country (type: string) + outputColumnNames: state, country + Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), country (type: string), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 80 Data size: 20640 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) + Statistics: Num rows: 80 Data size: 20640 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 8 Data size: 2064 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 8 Data size: 1384 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: drop table location +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@location +PREHOOK: Output: default@location +POSTHOOK: query: drop table location +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@location +POSTHOOK: Output: default@location diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out index 6262d37..b952fa6 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out @@ -56,11 +56,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: PARTIAL ListSink PREHOOK: query: insert overwrite table loc_orc partition(year) select * from loc_staging @@ -98,11 +98,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 5 Data size: 724 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 724 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- partition level analyze statistics for specific parition @@ -135,11 +135,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 2 Data size: 325 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 323 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 325 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 323 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- basicStatState: PARTIAL colStatState: NONE @@ -158,11 +158,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 9 Data size: 724 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 9 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 9 Data size: 724 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 9 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE @@ -181,11 +181,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 399 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 7 Data size: 399 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- partition level analyze statistics for all partitions @@ -222,11 +222,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE @@ -245,11 +245,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE @@ -268,11 +268,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- both partitions will be pruned @@ -293,14 +293,14 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: PARTIAL Filter Operator predicate: ((year = '2001') and (year = '__HIVE_DEFAULT_PARTITION__')) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), '__HIVE_DEFAULT_PARTITION__' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: PARTIAL ListSink PREHOOK: query: -- partition level partial column statistics @@ -322,33 +322,21 @@ POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE explain select zip from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: zip (type: bigint) - outputColumnNames: _col0 - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: zip (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL explain select state from loc_orc @@ -357,33 +345,44 @@ POSTHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL explain select state from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: PARTIAL - Select Operator - expressions: state (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL + ListSink +PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE +explain select year from loc_orc +PREHOOK: type: QUERY +POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE +explain select year from loc_orc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: year (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 8 Data size: 1472 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL -- basicStatState: COMPLETE colStatState: PARTIAL @@ -394,33 +393,21 @@ POSTHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supp explain select state,locid from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: PARTIAL - Select Operator - expressions: state (type: string), locid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL + ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE explain select state,locid from loc_orc where year='2001' @@ -429,33 +416,21 @@ POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE explain select state,locid from loc_orc where year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc - Statistics: Num rows: 7 Data size: 399 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: state (type: string), locid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 7 Data size: 630 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 630 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc + Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 7 Data size: 630 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE explain select state,locid from loc_orc where year!='2001' @@ -464,33 +439,21 @@ POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE explain select state,locid from loc_orc where year!='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc - Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: state (type: string), locid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc + Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL explain select * from loc_orc @@ -508,11 +471,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- This is to test filter expression evaluation on partition column @@ -533,7 +496,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 399 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -569,10 +532,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 399 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: locid (type: int), year (type: string) outputColumnNames: _col0, _col1 @@ -605,17 +568,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 399 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: locid (type: int), year (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 176 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out b/ql/src/test/results/clientpositive/annotate_stats_select.q.out index 1b3b334..8ef4964 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out @@ -152,33 +152,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 8 explain select bo1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: bo1 (type: boolean) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: bo1 (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- col alias renaming -- numRows: 2 rawDataSize: 8 @@ -189,33 +177,21 @@ POSTHOOK: query: -- col alias renaming explain select i1 as int1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: i1 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: i1 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 174 explain select s1 from alltypes_orc @@ -224,33 +200,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 174 explain select s1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: s1 (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: s1 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- column statistics for complex types unsupported and so statistics will not be updated -- numRows: 2 rawDataSize: 1514 @@ -261,33 +225,21 @@ POSTHOOK: query: -- column statistics for complex types unsupported and so stati explain select m1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: m1 (type: map) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: m1 (type: map) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 246 explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc @@ -296,33 +248,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 246 explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: bo1 (type: boolean), ti1 (type: tinyint), si1 (type: smallint), i1 (type: int), bi1 (type: bigint), f1 (type: float), d1 (type: double), s1 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 2 Data size: 246 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 246 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: bo1 (type: boolean), ti1 (type: tinyint), si1 (type: smallint), i1 (type: int), bi1 (type: bigint), f1 (type: float), d1 (type: double), s1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 2 Data size: 246 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 0 explain select null from alltypes_orc @@ -331,33 +271,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 0 explain select null from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: null (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: null (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 8 explain select 11 from alltypes_orc @@ -366,33 +294,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 8 explain select 11 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 11 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 11 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 16 explain select 11L from alltypes_orc @@ -401,33 +317,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 16 explain select 11L from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 11 (type: bigint) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 11 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 16 explain select 11.0 from alltypes_orc @@ -436,33 +340,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 16 explain select 11.0 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 11.0 (type: double) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 11.0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 178 explain select "hello" from alltypes_orc @@ -471,99 +363,63 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 178 explain select "hello" from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 'hello' (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'hello' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: explain select cast("hello" as char(5)) from alltypes_orc PREHOOK: type: QUERY POSTHOOK: query: explain select cast("hello" as char(5)) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: CAST( 'hello' AS CHAR(5) (type: char(5)) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 'hello' AS CHAR(5) (type: char(5)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: explain select cast("hello" as varchar(5)) from alltypes_orc PREHOOK: type: QUERY POSTHOOK: query: explain select cast("hello" as varchar(5)) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: CAST( 'hello' AS varchar(5)) (type: varchar(5)) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( 'hello' AS varchar(5)) (type: varchar(5)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 96 explain select unbase64("0xe23") from alltypes_orc @@ -572,33 +428,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 96 explain select unbase64("0xe23") from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: D317B6 (type: binary) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: D317B6 (type: binary) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 16 explain select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc @@ -607,33 +451,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 16 explain select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 1 (type: tinyint), 20 (type: smallint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 1 (type: tinyint), 20 (type: smallint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 80 explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc @@ -642,33 +474,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 80 explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 1970-12-31 15:59:58.174 (type: timestamp) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 1970-12-31 15:59:58.174 (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 112 explain select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc @@ -690,10 +510,10 @@ STAGE PLANS: Select Operator expressions: null (type: void) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -712,33 +532,21 @@ POSTHOOK: query: -- numRows: 2 rawDataSize: 224 explain select cast("58.174" as DECIMAL) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: CAST( '58.174' AS decimal(10,0)) (type: decimal(10,0)) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: CAST( '58.174' AS decimal(10,0)) (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 2 rawDataSize: 112 explain select array(1,2,3) from alltypes_orc @@ -997,33 +805,21 @@ POSTHOOK: query: -- column statistics for complex column types will be missing. explain select *,11 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: alltypes_orc - Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: PARTIAL - Select Operator - expressions: bo1 (type: boolean), ti1 (type: tinyint), si1 (type: smallint), i1 (type: int), bi1 (type: bigint), f1 (type: float), d1 (type: double), de1 (type: decimal(10,0)), ts1 (type: timestamp), da1 (type: timestamp), s1 (type: string), vc1 (type: varchar(5)), m1 (type: map), l1 (type: array), st1 (type: struct), 11 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 - Statistics: Num rows: 2 Data size: 428 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - Statistics: Num rows: 2 Data size: 428 Basic stats: COMPLETE Column stats: PARTIAL - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: alltypes_orc + Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: bo1 (type: boolean), ti1 (type: tinyint), si1 (type: smallint), i1 (type: int), bi1 (type: bigint), f1 (type: float), d1 (type: double), de1 (type: decimal(10,0)), ts1 (type: timestamp), da1 (type: timestamp), s1 (type: string), vc1 (type: varchar(5)), m1 (type: map), l1 (type: array), st1 (type: struct), 11 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 + Statistics: Num rows: 2 Data size: 428 Basic stats: COMPLETE Column stats: PARTIAL + ListSink PREHOOK: query: -- subquery selects -- inner select - numRows: 2 rawDataSize: 8 diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out index e0e7021..eed5daa 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out @@ -171,33 +171,21 @@ POSTHOOK: query: -- all selected columns have statistics explain select deptid from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: deptid (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: emp_orc + Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: deptid (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- column level complete statistics analyze table emp_orc compute statistics for columns lastname,deptid @@ -239,33 +227,21 @@ POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE explain select lastname from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: lastname (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 48 Data size: 4368 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 48 Data size: 4368 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: emp_orc + Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: lastname (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 48 Data size: 4368 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE explain select deptid from emp_orc @@ -274,33 +250,21 @@ POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE explain select deptid from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: deptid (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: emp_orc + Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: deptid (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE explain select lastname,deptid from emp_orc @@ -309,31 +273,19 @@ POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE explain select lastname,deptid from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: lastname (type: string), deptid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 48 Data size: 4560 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 48 Data size: 4560 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: emp_orc + Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: lastname (type: string), deptid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 48 Data size: 4560 Basic stats: COMPLETE Column stats: COMPLETE + ListSink diff --git a/ql/src/test/results/clientpositive/annotate_stats_union.q.out b/ql/src/test/results/clientpositive/annotate_stats_union.q.out index 1e8c46b..e0e1504 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_union.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_union.q.out @@ -67,33 +67,21 @@ POSTHOOK: query: -- numRows: 8 rawDataSize: 688 explain select state from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc - Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: state (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- numRows: 16 rawDataSize: 1376 explain select * from (select state from loc_orc union all select state from loc_orc) tmp diff --git a/ql/src/test/results/clientpositive/authorization_delete.q.out b/ql/src/test/results/clientpositive/authorization_delete.q.out index 9aa4600..260b9a4 100644 --- a/ql/src/test/results/clientpositive/authorization_delete.q.out +++ b/ql/src/test/results/clientpositive/authorization_delete.q.out @@ -1,12 +1,12 @@ PREHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) -CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc +CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_auth_del POSTHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) -CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc +CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_auth_del diff --git a/ql/src/test/results/clientpositive/authorization_delete_own_table.q.out b/ql/src/test/results/clientpositive/authorization_delete_own_table.q.out index 1e0f9c8..24833ed 100644 --- a/ql/src/test/results/clientpositive/authorization_delete_own_table.q.out +++ b/ql/src/test/results/clientpositive/authorization_delete_own_table.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc +PREHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@auth_noupd -POSTHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc +POSTHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@auth_noupd diff --git a/ql/src/test/results/clientpositive/authorization_explain.q.out b/ql/src/test/results/clientpositive/authorization_explain.q.out index e5e605b..ba73021 100644 --- a/ql/src/test/results/clientpositive/authorization_explain.q.out +++ b/ql/src/test/results/clientpositive/authorization_explain.q.out @@ -5,11 +5,11 @@ POSTHOOK: query: explain authorization select * from src join srcpart POSTHOOK: type: QUERY INPUTS: default@srcpart + default@src default@srcpart@ds=2008-04-08/hr=11 default@srcpart@ds=2008-04-08/hr=12 default@srcpart@ds=2008-04-09/hr=11 default@srcpart@ds=2008-04-09/hr=12 - default@src OUTPUTS: #### A masked pattern was here #### CURRENT_USER: diff --git a/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out b/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out new file mode 100644 index 0000000..745a8ae --- /dev/null +++ b/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out @@ -0,0 +1,78 @@ +PREHOOK: query: set role admin +PREHOOK: type: SHOW_ROLES +POSTHOOK: query: set role admin +POSTHOOK: type: SHOW_ROLES +PREHOOK: query: create role r1 +PREHOOK: type: CREATEROLE +POSTHOOK: query: create role r1 +POSTHOOK: type: CREATEROLE +PREHOOK: query: grant role r1 to user r1user +PREHOOK: type: GRANT_ROLE +POSTHOOK: query: grant role r1 to user r1user +POSTHOOK: type: GRANT_ROLE +PREHOOK: query: CREATE TABLE t1(i int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: CREATE TABLE t1(i int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: -- all privileges should have been set for user + +GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@t1 +POSTHOOK: query: -- all privileges should have been set for user + +GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@t1 +PREHOOK: query: -- check if user belong to role r1 can grant privileges to others +GRANT ALL ON t1 TO USER user3 +PREHOOK: type: GRANT_PRIVILEGE +PREHOOK: Output: default@t1 +POSTHOOK: query: -- check if user belong to role r1 can grant privileges to others +GRANT ALL ON t1 TO USER user3 +POSTHOOK: type: GRANT_PRIVILEGE +POSTHOOK: Output: default@t1 +PREHOOK: query: set role admin +PREHOOK: type: SHOW_ROLES +POSTHOOK: query: set role admin +POSTHOOK: type: SHOW_ROLES +PREHOOK: query: -- check privileges on table +show grant on table t1 +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: -- check privileges on table +show grant on table t1 +POSTHOOK: type: SHOW_GRANT +default t1 r1 ROLE DELETE true -1 user1 +default t1 r1 ROLE INSERT true -1 user1 +default t1 r1 ROLE SELECT true -1 user1 +default t1 r1 ROLE UPDATE true -1 user1 +default t1 user1 USER DELETE true -1 hive_admin_user +default t1 user1 USER INSERT true -1 hive_admin_user +default t1 user1 USER SELECT true -1 hive_admin_user +default t1 user1 USER UPDATE true -1 hive_admin_user +default t1 user3 USER DELETE false -1 r1user +default t1 user3 USER INSERT false -1 r1user +default t1 user3 USER SELECT false -1 r1user +default t1 user3 USER UPDATE false -1 r1user +PREHOOK: query: -- check if drop role removes privileges for that role +drop role r1 +PREHOOK: type: DROPROLE +POSTHOOK: query: -- check if drop role removes privileges for that role +drop role r1 +POSTHOOK: type: DROPROLE +PREHOOK: query: show grant on table t1 +PREHOOK: type: SHOW_GRANT +POSTHOOK: query: show grant on table t1 +POSTHOOK: type: SHOW_GRANT +default t1 user1 USER DELETE true -1 hive_admin_user +default t1 user1 USER INSERT true -1 hive_admin_user +default t1 user1 USER SELECT true -1 hive_admin_user +default t1 user1 USER UPDATE true -1 hive_admin_user +default t1 user3 USER DELETE false -1 r1user +default t1 user3 USER INSERT false -1 r1user +default t1 user3 USER SELECT false -1 r1user +default t1 user3 USER UPDATE false -1 r1user diff --git a/ql/src/test/results/clientpositive/authorization_update.q.out b/ql/src/test/results/clientpositive/authorization_update.q.out index 019d363..ef65b33 100644 --- a/ql/src/test/results/clientpositive/authorization_update.q.out +++ b/ql/src/test/results/clientpositive/authorization_update.q.out @@ -1,12 +1,12 @@ PREHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) -CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc +CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_auth_up POSTHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) -CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc +CREATE TABLE t_auth_up(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_auth_up diff --git a/ql/src/test/results/clientpositive/authorization_update_own_table.q.out b/ql/src/test/results/clientpositive/authorization_update_own_table.q.out index cbf8f57..e4b5f0b 100644 --- a/ql/src/test/results/clientpositive/authorization_update_own_table.q.out +++ b/ql/src/test/results/clientpositive/authorization_update_own_table.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc +PREHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@auth_noupd -POSTHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc +POSTHOOK: query: create table auth_noupd(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@auth_noupd diff --git a/ql/src/test/results/clientpositive/auto_join8.q.out b/ql/src/test/results/clientpositive/auto_join8.q.out index 2e3d1c8..bf9b8b4 100644 --- a/ql/src/test/results/clientpositive/auto_join8.q.out +++ b/ql/src/test/results/clientpositive/auto_join8.q.out @@ -97,7 +97,7 @@ STAGE PLANS: predicate: _col2 is null (type: boolean) Statistics: Num rows: 15 Data size: 163 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int), _col3 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(null) (type: int), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 15 Data size: 163 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out index eaf6c15..3c30a31 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out @@ -407,7 +407,7 @@ STAGE PLANS: d TableScan alias: d - Statistics: Num rows: 0 Data size: 170 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 170 Basic stats: PARTIAL Column stats: NONE GatherStats: false HashTable Sink Operator condition expressions: diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out index 03507dd..b32ac06 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out @@ -232,6 +232,16 @@ POSTHOOK: Input: default@bucket_small@pri=2 0 val_0 val_0 day1 1 0 val_0 val_0 day1 1 0 val_0 val_0 day1 1 +103 val_103 val_103 day1 1 +103 val_103 val_103 day1 1 +103 val_103 val_103 day1 1 +103 val_103 val_103 day1 1 +374 val_374 val_374 day1 1 +374 val_374 val_374 day1 1 +172 val_172 val_172 day1 1 +172 val_172 val_172 day1 1 +172 val_172 val_172 day1 1 +172 val_172 val_172 day1 1 169 val_169 val_169 day1 1 169 val_169 val_169 day1 1 169 val_169 val_169 day1 1 @@ -240,13 +250,3 @@ POSTHOOK: Input: default@bucket_small@pri=2 169 val_169 val_169 day1 1 169 val_169 val_169 day1 1 169 val_169 val_169 day1 1 -374 val_374 val_374 day1 1 -374 val_374 val_374 day1 1 -172 val_172 val_172 day1 1 -172 val_172 val_172 day1 1 -172 val_172 val_172 day1 1 -172 val_172 val_172 day1 1 -103 val_103 val_103 day1 1 -103 val_103 val_103 day1 1 -103 val_103 val_103 day1 1 -103 val_103 val_103 day1 1 diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out index 30a8436..1904cc2 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out @@ -157,8 +157,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -179,8 +177,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -329,8 +325,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -351,8 +345,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -532,8 +524,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -554,8 +544,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -594,8 +582,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_small numFiles 4 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_small { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -709,8 +695,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -731,8 +715,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -771,8 +753,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_small numFiles 4 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_small { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -863,8 +843,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -885,8 +863,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/avro_charvarchar.q.out b/ql/src/test/results/clientpositive/avro_charvarchar.q.out new file mode 100644 index 0000000..41b5a41 --- /dev/null +++ b/ql/src/test/results/clientpositive/avro_charvarchar.q.out @@ -0,0 +1,87 @@ +PREHOOK: query: DROP TABLE avro_charvarchar_staging +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE avro_charvarchar_staging +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE avro_charvarchar +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE avro_charvarchar +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE avro_charvarchar_staging ( + cchar char(5), + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct +) ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':' +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@avro_charvarchar_staging +POSTHOOK: query: CREATE TABLE avro_charvarchar_staging ( + cchar char(5), + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct +) ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@avro_charvarchar_staging +PREHOOK: query: CREATE TABLE avro_charvarchar ( + cchar char(5), + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct +) STORED AS AVRO +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@avro_charvarchar +POSTHOOK: query: CREATE TABLE avro_charvarchar ( + cchar char(5), + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct +) STORED AS AVRO +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@avro_charvarchar +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_charvarchar.txt' OVERWRITE INTO TABLE avro_charvarchar_staging +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@avro_charvarchar_staging +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/avro_charvarchar.txt' OVERWRITE INTO TABLE avro_charvarchar_staging +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@avro_charvarchar_staging +PREHOOK: query: INSERT OVERWRITE TABLE avro_charvarchar SELECT * FROM avro_charvarchar_staging +PREHOOK: type: QUERY +PREHOOK: Input: default@avro_charvarchar_staging +PREHOOK: Output: default@avro_charvarchar +POSTHOOK: query: INSERT OVERWRITE TABLE avro_charvarchar SELECT * FROM avro_charvarchar_staging +POSTHOOK: type: QUERY +POSTHOOK: Input: default@avro_charvarchar_staging +POSTHOOK: Output: default@avro_charvarchar +POSTHOOK: Lineage: avro_charvarchar.cchar SIMPLE [(avro_charvarchar_staging)avro_charvarchar_staging.FieldSchema(name:cchar, type:char(5), comment:null), ] +POSTHOOK: Lineage: avro_charvarchar.cvarchar SIMPLE [(avro_charvarchar_staging)avro_charvarchar_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ] +POSTHOOK: Lineage: avro_charvarchar.l1 SIMPLE [(avro_charvarchar_staging)avro_charvarchar_staging.FieldSchema(name:l1, type:array, comment:null), ] +POSTHOOK: Lineage: avro_charvarchar.m1 SIMPLE [(avro_charvarchar_staging)avro_charvarchar_staging.FieldSchema(name:m1, type:map, comment:null), ] +POSTHOOK: Lineage: avro_charvarchar.st1 SIMPLE [(avro_charvarchar_staging)avro_charvarchar_staging.FieldSchema(name:st1, type:struct, comment:null), ] +PREHOOK: query: SELECT * FROM avro_charvarchar +PREHOOK: type: QUERY +PREHOOK: Input: default@avro_charvarchar +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM avro_charvarchar +POSTHOOK: type: QUERY +POSTHOOK: Input: default@avro_charvarchar +#### A masked pattern was here #### +a a {"k1":"v1"} ["101","x200"] {"c1":10,"c2":"abcd"} +ab ab {"k2":"v1"} ["102","y200"] {"c1":10,"c2":"abc"} +abc abc {"k3":"v1"} ["103","200"] {"c1":10,"c2":"a "} +abcde abcdefghij {"k9":"v1"} ["109","200"] {"c1":10,"c2":" a"} diff --git a/ql/src/test/results/clientpositive/binarysortable_1.q.out b/ql/src/test/results/clientpositive/binarysortable_1.q.out index 7ff3fd4..e486466 100644 --- a/ql/src/test/results/clientpositive/binarysortable_1.q.out +++ b/ql/src/test/results/clientpositive/binarysortable_1.q.out @@ -56,12 +56,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 93 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 93 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: double) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/bucketcontext_5.q.out b/ql/src/test/results/clientpositive/bucketcontext_5.q.out index 085cce9..feb833d 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_5.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_5.q.out @@ -200,8 +200,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -222,8 +220,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -380,8 +376,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -402,8 +396,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.bucket_big numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct bucket_big { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/bucketmapjoin1.q.out index 72a9173..9a76f94 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin1.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin1.q.out @@ -601,8 +601,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -622,8 +620,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/bucketmapjoin4.q.out b/ql/src/test/results/clientpositive/bucketmapjoin4.q.out index 1210bed..2af66a2 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin4.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin4.q.out @@ -292,8 +292,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -313,8 +311,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -783,8 +779,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -804,8 +798,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out index 8181498..258a962 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out @@ -288,8 +288,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -309,8 +307,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out index c2abc1c..d99f5b4 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out @@ -343,8 +343,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -364,8 +362,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out index c87b857..40148af 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out @@ -302,8 +302,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -324,8 +322,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -501,8 +497,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test2 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -523,8 +517,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test2 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -689,8 +681,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -711,8 +701,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -880,8 +868,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -902,8 +888,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1071,8 +1055,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1093,8 +1075,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1262,8 +1242,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1284,8 +1262,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test1 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test1 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1453,8 +1429,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test2 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1475,8 +1449,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test2 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1644,8 +1616,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test2 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1666,8 +1636,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test2 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test2 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1835,8 +1803,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test3 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1857,8 +1823,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.test3 numFiles 3 - numRows 0 - rawDataSize 0 serialization.ddl struct test3 { string key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/cbo_correctness.q.out b/ql/src/test/results/clientpositive/cbo_correctness.q.out new file mode 100644 index 0000000..3335d4d --- /dev/null +++ b/ql/src/test/results/clientpositive/cbo_correctness.q.out @@ -0,0 +1,18962 @@ +PREHOOK: query: drop table if exists t1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 +POSTHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t2 +PREHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t3 +POSTHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t3 +PREHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1@dt=2014 +PREHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2@dt=2014 +PREHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t3 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t3 +PREHOOK: query: CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@part +POSTHOOK: query: CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@part +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@part +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@part +PREHOOK: query: DROP TABLE lineitem +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE lineitem +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@lineitem +POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@lineitem +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@lineitem +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@lineitem +PREHOOK: query: create table src_cbo as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_cbo +POSTHOOK: query: create table src_cbo as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_cbo +PREHOOK: query: analyze table t1 partition (dt) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Output: default@t1 +PREHOOK: Output: default@t1@dt=2014 +POSTHOOK: query: analyze table t1 partition (dt) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1@dt=2014 +PREHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: analyze table t2 partition (dt) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Output: default@t2 +PREHOOK: Output: default@t2@dt=2014 +POSTHOOK: query: analyze table t2 partition (dt) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2@dt=2014 +PREHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: analyze table t3 compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +PREHOOK: Output: default@t3 +POSTHOOK: query: analyze table t3 compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@t3 +PREHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +PREHOOK: query: analyze table src_cbo compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +PREHOOK: Output: default@src_cbo +POSTHOOK: query: analyze table src_cbo compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +POSTHOOK: Output: default@src_cbo +PREHOOK: query: analyze table src_cbo compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: analyze table src_cbo compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +PREHOOK: query: analyze table part compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@part +PREHOOK: Output: default@part +POSTHOOK: query: analyze table part compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +POSTHOOK: Output: default@part +PREHOOK: query: analyze table part compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: analyze table part compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +PREHOOK: query: analyze table lineitem compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +PREHOOK: Output: default@lineitem +POSTHOOK: query: analyze table lineitem compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +POSTHOOK: Output: default@lineitem +PREHOOK: query: analyze table lineitem compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: analyze table lineitem compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +PREHOOK: query: -- 1. Test Select + TS +select * from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 1. Test Select + TS +select * from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from t1 as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from t1 as t2 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +null NULL NULL +null NULL NULL +PREHOOK: query: -- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: -- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.key from t1 join t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key from t1 join t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +PREHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: -- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL +1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL + 1 4 2 + 1 4 2 +1 4 12 +1 4 2 +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +2 5.0 +12 5.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL +null NULL +1 1 +1 1 +1 1 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +20 18 18 1.0 1 1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 18 18 1.0 1 1 2 36 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +20 1 18 1.0 1 1 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 1 18 1.0 1 1 2 36 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 20 1 18 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 20 1 1 +PREHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 1.0 1 +2 1.0 1 +12 1.0 1 +2 1.0 1 +0 NULL null +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select 1+sum(c_int) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select 1+sum(c_int) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +36 +PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from t1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 +POSTHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from t1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v1 +PREHOOK: query: create view v2 as select c_int, value from t2 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t2 +PREHOOK: Output: database:default +PREHOOK: Output: default@v2 +POSTHOOK: query: create view v2 as select c_int, value from t2 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t2 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v2 +PREHOOK: query: select value from v1 where c_boolean=false +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select value from v1 where c_boolean=false +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +1 +1 +PREHOOK: query: select max(c_int) from v1 group by (c_boolean) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select max(c_int) from v1 group by (c_boolean) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +NULL +1 +1 +PREHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v2 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v2 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +160 +PREHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Input: default@v1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v3 +POSTHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@v1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v3 +PREHOOK: query: select count(val) from v3 where val != '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v3 +#### A masked pattern was here #### +POSTHOOK: query: select count(val) from v3 where val != '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v3 +#### A masked pattern was here #### +96 +PREHOOK: query: with q1 as ( select key from t1 where key = '1') +select count(*) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select key from t1 where key = '1') +select count(*) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +12 +PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: create view v4 as +with q1 as ( select key,c_int from t1 where key = '1') +select * from q1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v4 +POSTHOOK: query: create view v4 as +with q1 as ( select key,c_int from t1 where key = '1') +select * from q1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v4 +PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v4 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v4 +#### A masked pattern was here #### +31104 +PREHOOK: query: drop view v1 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v1 +PREHOOK: Output: default@v1 +POSTHOOK: query: drop view v1 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v1 +POSTHOOK: Output: default@v1 +PREHOOK: query: drop view v2 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v2 +PREHOOK: Output: default@v2 +POSTHOOK: query: drop view v2 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v2 +POSTHOOK: Output: default@v2 +PREHOOK: query: drop view v3 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v3 +PREHOOK: Output: default@v3 +POSTHOOK: query: drop view v3 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v3 +POSTHOOK: Output: default@v3 +PREHOOK: query: drop view v4 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v4 +PREHOOK: Output: default@v4 +POSTHOOK: query: drop view v4 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v4 +POSTHOOK: Output: default@v4 +PREHOOK: query: -- 11. Union All +select * from t1 union all select * from t2 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 11. Union All +select * from t1 union all select * from t2 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +PREHOOK: query: -- 12. SemiJoin +select t1.c_int from t1 left semi join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 12. SemiJoin +select t1.c_int from t1 left semi join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +NULL +NULL +PREHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +PREHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: -- 13. null expr in select list +select null from t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: -- 13. null expr in select list +select null from t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: -- 14. unary operator +select key from t1 where c_int = -6 or c_int = +6 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 14. unary operator +select key from t1 where c_int = -6 or c_int = +6 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: -- 15. query referencing only partition columns +select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 15. query referencing only partition columns +select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +400 +PREHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +almond antique blue firebrick mint 31 +almond antique burnished rose metallic 2 +almond antique burnished rose metallic 2 +almond antique chartreuse khaki white 17 +almond antique chartreuse lavender yellow 34 +almond antique forest lavender goldenrod 14 +almond antique gainsboro frosted violet 10 +almond antique medium spring khaki 6 +almond antique metallic orange dim 19 +almond antique misty red olive 1 +almond antique olive coral navajo 45 +almond antique salmon chartreuse burlywood 6 +almond antique sky peru orange 2 +almond antique violet chocolate turquoise 14 +almond antique violet mint lemon 39 +almond antique violet turquoise frosted 40 +almond aquamarine burnished black steel 28 +almond aquamarine dodger light gainsboro 46 +almond aquamarine floral ivory bisque 27 +almond aquamarine midnight light salmon 2 +almond aquamarine pink moccasin thistle 42 +almond aquamarine rose maroon antique 25 +almond aquamarine sandy cyan gainsboro 18 +almond aquamarine yellow dodger mint 7 +almond azure aquamarine papaya violet 12 +almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#5 almond antique medium spring khaki 6 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +450 1 +7068 1 +21636 1 +22630 1 +59694 1 +61931 1 +85951 1 +88035 1 +88362 1 +106170 1 +119477 1 +119767 1 +123076 1 +139636 1 +175839 1 +182052 1 +PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 +PREHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 +PREHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +4297 1798 +108570 8571 +PREHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +80 val_80 1 +96 val_96 1 +92 val_92 1 +9 val_9 1 +87 val_87 1 +86 val_86 1 +85 val_85 1 +82 val_82 1 +84 val_84 2 +95 val_95 2 +83 val_83 2 +98 val_98 2 +97 val_97 2 +90 val_90 3 +PREHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2.0 +Manufacturer#3 almond antique misty red olive 1.0 +Manufacturer#5 almond antique sky peru orange 2.0 +Manufacturer#2 almond aquamarine midnight light salmon 2.0 +Manufacturer#4 almond aquamarine yellow dodger mint 7.0 +PREHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +PREHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src_cbo +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 +POSTHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src_cbo +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cv1 +PREHOOK: query: select * from cv1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cv1 +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: select * from cv1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 3 +92 1 +95 2 +96 1 +97 2 +98 2 +PREHOOK: query: -- 17. get stats with empty partition list +select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 17. get stats with empty partition list +select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/column_access_stats.q.out b/ql/src/test/results/clientpositive/column_access_stats.q.out index b7da2b8..103e6e2 100644 --- a/ql/src/test/results/clientpositive/column_access_stats.q.out +++ b/ql/src/test/results/clientpositive/column_access_stats.q.out @@ -83,33 +83,21 @@ PREHOOK: query: -- More complicated select queries EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1 PREHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: t1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + ListSink PREHOOK: query: SELECT key FROM (SELECT key, val FROM T1) subq1 PREHOOK: type: QUERY @@ -127,33 +115,21 @@ Columns:key PREHOOK: query: EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1) subq1 PREHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: t1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + ListSink PREHOOK: query: SELECT k FROM (SELECT key as k, val as v FROM T1) subq1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out b/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out index d6fd01f..b7987f4 100644 --- a/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out +++ b/ql/src/test/results/clientpositive/columnstats_tbllvl.q.out @@ -146,8 +146,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.uservisits_web_text_none numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} serialization.format | serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -167,8 +165,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.uservisits_web_text_none numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} serialization.format | serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/combine2.q.out b/ql/src/test/results/clientpositive/combine2.q.out index 831612d..921dd90 100644 --- a/ql/src/test/results/clientpositive/combine2.q.out +++ b/ql/src/test/results/clientpositive/combine2.q.out @@ -94,33 +94,21 @@ POSTHOOK: query: explain select key, value from combine2 where value is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: combine2 - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: combine2 + Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select key, value from combine2 where value is not null PREHOOK: type: QUERY @@ -197,18 +185,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: combine2 - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12 Data size: 14 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -275,7 +263,7 @@ STAGE PLANS: columns.types string #### A masked pattern was here #### name default.combine2 - numFiles 1 + numFiles 3 numRows 3 partition_columns value partition_columns.types string @@ -410,7 +398,7 @@ STAGE PLANS: columns.types string #### A masked pattern was here #### name default.combine2 - numFiles 1 + numFiles 3 numRows 3 partition_columns value partition_columns.types string @@ -590,17 +578,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -664,22 +652,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: ds (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2000 Data size: 384000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 384000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -687,14 +675,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out new file mode 100644 index 0000000..317a642 --- /dev/null +++ b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out @@ -0,0 +1,298 @@ +Warning: Shuffle Join JOIN[4][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: explain extended + select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c +PREHOOK: type: QUERY +POSTHOOK: query: explain extended + select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_SUBQUERY + TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + src + a + TOK_TABREF + TOK_TABNAME + src1 + b + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + . + TOK_TABLE_OR_COL + a + key + ak + TOK_SELEXPR + . + TOK_TABLE_OR_COL + a + value + av + TOK_SELEXPR + . + TOK_TABLE_OR_COL + b + key + bk + TOK_SELEXPR + . + TOK_TABLE_OR_COL + b + value + bv + TOK_WHERE + = + . + TOK_TABLE_OR_COL + a + key + '429' + c + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_ALLCOLREF + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: (key = '429') (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + tag: 0 + value expressions: value (type: string) + auto parallelism: false + TableScan + alias: b + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Reduce Output Operator + sort order: + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + tag: 1 + value expressions: key (type: string), value (type: string) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: src + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments defaultdefault + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 500 + rawDataSize 5312 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments defaultdefault + columns.types string:string +#### A masked pattern was here #### + name default.src + numFiles 1 + numRows 500 + rawDataSize 5312 + serialization.ddl struct src { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src + name: default.src +#### A masked pattern was here #### + Partition + base file name: src1 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments defaultdefault + columns.types string:string +#### A masked pattern was here #### + name default.src1 + numFiles 1 + numRows 25 + rawDataSize 191 + serialization.ddl struct src1 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 216 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key,value + columns.comments defaultdefault + columns.types string:string +#### A masked pattern was here #### + name default.src1 + numFiles 1 + numRows 25 + rawDataSize 191 + serialization.ddl struct src1 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 216 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src1 + name: default.src1 + Truncated Path -> Alias: + /src [c:a] + /src1 [c:b] + Needs Tagging: true + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col1} + 1 {VALUE._col0} {VALUE._col1} + outputColumnNames: _col1, _col5, _col6 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '429' (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1,_col2,_col3 + columns.types string:string:string:string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join JOIN[4][tables = [a, b]] in Stage 'Stage-1:MAPRED' is a cross product +PREHOOK: query: select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +429 val_429 +429 val_429 +429 val_429 +429 val_429 406 val_406 +429 val_429 146 val_146 +429 val_429 213 val_213 +429 val_429 128 +429 val_429 66 val_66 +429 val_429 369 +429 val_429 224 +429 val_429 273 val_273 +429 val_429 150 val_150 +429 val_429 401 val_401 +429 val_429 val_193 +429 val_429 val_265 +429 val_429 val_484 +429 val_429 98 val_98 +429 val_429 278 val_278 +429 val_429 255 val_255 +429 val_429 val_409 +429 val_429 val_165 +429 val_429 val_27 +429 val_429 311 val_311 +429 val_429 +429 val_429 238 val_238 +429 val_429 +429 val_429 +429 val_429 +429 val_429 406 val_406 +429 val_429 146 val_146 +429 val_429 213 val_213 +429 val_429 128 +429 val_429 66 val_66 +429 val_429 369 +429 val_429 224 +429 val_429 273 val_273 +429 val_429 150 val_150 +429 val_429 401 val_401 +429 val_429 val_193 +429 val_429 val_265 +429 val_429 val_484 +429 val_429 98 val_98 +429 val_429 278 val_278 +429 val_429 255 val_255 +429 val_429 val_409 +429 val_429 val_165 +429 val_429 val_27 +429 val_429 311 val_311 +429 val_429 +429 val_429 238 val_238 diff --git a/ql/src/test/results/clientpositive/constprog_dp.q.out b/ql/src/test/results/clientpositive/constprog_dp.q.out index e876467..98af017 100644 --- a/ql/src/test/results/clientpositive/constprog_dp.q.out +++ b/ql/src/test/results/clientpositive/constprog_dp.q.out @@ -16,8 +16,13 @@ insert overwrite table dest partition (ds) select key, value, ds where ds='2008- POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -30,23 +35,23 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.dest + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -63,6 +68,36 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + PREHOOK: query: from srcpart insert overwrite table dest partition (ds) select key, value, ds where ds='2008-04-08' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/create_func1.q.out b/ql/src/test/results/clientpositive/create_func1.q.out index 798f77f..30c16c7 100644 --- a/ql/src/test/results/clientpositive/create_func1.q.out +++ b/ql/src/test/results/clientpositive/create_func1.q.out @@ -9,6 +9,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true false +PREHOOK: query: describe function extended qtest_get_java_boolean +PREHOOK: type: DESCFUNCTION +POSTHOOK: query: describe function extended qtest_get_java_boolean +POSTHOOK: type: DESCFUNCTION +qtest_get_java_boolean(str) - GenericUDF to return native Java's boolean type +Synonyms: default.qtest_get_java_boolean PREHOOK: query: create database mydb PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:mydb @@ -28,6 +34,15 @@ PREHOOK: type: SHOWFUNCTIONS POSTHOOK: query: show functions mydb.func1 POSTHOOK: type: SHOWFUNCTIONS mydb.func1 +PREHOOK: query: describe function extended mydb.func1 +PREHOOK: type: DESCFUNCTION +POSTHOOK: query: describe function extended mydb.func1 +POSTHOOK: type: DESCFUNCTION +mydb.func1(str) - Returns str with all characters changed to uppercase +Synonyms: upper, ucase +Example: + > SELECT mydb.func1('Facebook') FROM src LIMIT 1; + 'FACEBOOK' PREHOOK: query: select mydb.func1('abc') from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/decimal_udf.q.out b/ql/src/test/results/clientpositive/decimal_udf.q.out index c5c2031..8fda14c 100644 --- a/ql/src/test/results/clientpositive/decimal_udf.q.out +++ b/ql/src/test/results/clientpositive/decimal_udf.q.out @@ -633,6 +633,61 @@ NULL 1.00000000000000000000 NULL NULL +PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: decimal_udf + Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key * value) > 0) (type: boolean) + Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: decimal(20,10)), value (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, value FROM DECIMAL_UDF where key * value > 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf +#### A masked pattern was here #### +100 100 +10 10 +1 1 +200 200 +20 20 +2 2 +1.0 1 +2 2 +3.14 3 +-1.12 -1 +-1.12 -1 +-1.122 -11 +1.12 1 +1.122 1 +124.00 124 +125.2 125 +-1255.49 -1255 +3.14 3 +3.14 3 +3.140 4 +1.0000000000 1 +-1234567890.1234567890 -1234567890 +1234567890.1234567800 1234567890 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF diff --git a/ql/src/test/results/clientpositive/decimal_udf2.q.out b/ql/src/test/results/clientpositive/decimal_udf2.q.out new file mode 100644 index 0000000..d8511d6 --- /dev/null +++ b/ql/src/test/results/clientpositive/decimal_udf2.q.out @@ -0,0 +1,148 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_UDF2 (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_UDF2 +POSTHOOK: query: CREATE TABLE DECIMAL_UDF2 (key decimal(20,10), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_UDF2 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_udf2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_UDF2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_udf2 +PREHOOK: query: EXPLAIN +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf2 + Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key = 10) (type: boolean) + Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: acos(key) (type: double), asin(key) (type: double), atan(key) (type: double), cos(key) (type: double), sin(key) (type: double), tan(key) (type: double), radians(key) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT acos(key), asin(key), atan(key), cos(key), sin(key), tan(key), radians(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +NaN NaN 1.4711276743037347 -0.8390715290764524 -0.5440211108893698 0.6483608274590866 0.17453292519943295 +PREHOOK: query: EXPLAIN +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: decimal_udf2 + Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key = 10) (type: boolean) + Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: exp(key) (type: double), ln(key) (type: double), log(key) (type: double), log(key, key) (type: double), log(key, value) (type: double), log(value, key) (type: double), log10(key) (type: double), sqrt(key) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT + exp(key), ln(key), + log(key), log(key, key), log(key, value), log(value, key), + log10(key), sqrt(key) +FROM DECIMAL_UDF2 WHERE key = 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_udf2 +#### A masked pattern was here #### +22026.465794806718 2.302585092994046 2.302585092994046 1.0 1.0 1.0 1.0 3.1622776601683795 +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_udf2 +PREHOOK: Output: default@decimal_udf2 +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_UDF2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_udf2 +POSTHOOK: Output: default@decimal_udf2 diff --git a/ql/src/test/results/clientpositive/delete_all_non_partitioned.q.out b/ql/src/test/results/clientpositive/delete_all_non_partitioned.q.out index 0d428ca..38ce075 100644 --- a/ql/src/test/results/clientpositive/delete_all_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/delete_all_non_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_danp -POSTHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_danp diff --git a/ql/src/test/results/clientpositive/delete_all_partitioned.q.out b/ql/src/test/results/clientpositive/delete_all_partitioned.q.out index 4486323..c5149b2 100644 --- a/ql/src/test/results/clientpositive/delete_all_partitioned.q.out +++ b/ql/src/test/results/clientpositive/delete_all_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dap -POSTHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dap @@ -84,3 +84,5 @@ POSTHOOK: Input: default@acid_dap POSTHOOK: Input: default@acid_dap@ds=today POSTHOOK: Input: default@acid_dap@ds=tomorrow #### A masked pattern was here #### +-1071480828 aw724t8c5558x2xneC624 today +-1072076362 2uLyD28144vklju213J1mr today diff --git a/ql/src/test/results/clientpositive/delete_tmp_table.q.out b/ql/src/test/results/clientpositive/delete_tmp_table.q.out index ca568b3..4dc7344 100644 --- a/ql/src/test/results/clientpositive/delete_tmp_table.q.out +++ b/ql/src/test/results/clientpositive/delete_tmp_table.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dtt -POSTHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dtt diff --git a/ql/src/test/results/clientpositive/delete_where_no_match.q.out b/ql/src/test/results/clientpositive/delete_where_no_match.q.out index 1450ee6..cb2adc6 100644 --- a/ql/src/test/results/clientpositive/delete_where_no_match.q.out +++ b/ql/src/test/results/clientpositive/delete_where_no_match.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dwnm -POSTHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dwnm diff --git a/ql/src/test/results/clientpositive/delete_where_non_partitioned.q.out b/ql/src/test/results/clientpositive/delete_where_non_partitioned.q.out index d465e8e..1bdb1e6 100644 --- a/ql/src/test/results/clientpositive/delete_where_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/delete_where_non_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dwnp -POSTHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dwnp diff --git a/ql/src/test/results/clientpositive/delete_where_partitioned.q.out b/ql/src/test/results/clientpositive/delete_where_partitioned.q.out index 9f8581b..fc2e369 100644 --- a/ql/src/test/results/clientpositive/delete_where_partitioned.q.out +++ b/ql/src/test/results/clientpositive/delete_where_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dwp -POSTHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dwp diff --git a/ql/src/test/results/clientpositive/delete_whole_partition.q.out b/ql/src/test/results/clientpositive/delete_whole_partition.q.out index a2408eb..043daf4 100644 --- a/ql/src/test/results/clientpositive/delete_whole_partition.q.out +++ b/ql/src/test/results/clientpositive/delete_whole_partition.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dwhp -POSTHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dwhp diff --git a/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out b/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out index 7745be7..2b26245 100644 --- a/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out +++ b/ql/src/test/results/clientpositive/display_colstats_tbllvl.q.out @@ -162,8 +162,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.uservisits_web_text_none numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} serialization.format | serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -183,8 +181,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.uservisits_web_text_none numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} serialization.format | serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/drop_table_purge.q.out b/ql/src/test/results/clientpositive/drop_table_purge.q.out new file mode 100644 index 0000000..14f53b6 --- /dev/null +++ b/ql/src/test/results/clientpositive/drop_table_purge.q.out @@ -0,0 +1,16 @@ +PREHOOK: query: CREATE TABLE IF NOT EXISTS temp(col STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@temp +POSTHOOK: query: CREATE TABLE IF NOT EXISTS temp(col STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@temp +PREHOOK: query: DROP TABLE temp PURGE +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@temp +PREHOOK: Output: default@temp +POSTHOOK: query: DROP TABLE temp PURGE +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@temp +POSTHOOK: Output: default@temp diff --git a/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out b/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out index f2bebe4..a209ae9 100644 --- a/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out +++ b/ql/src/test/results/clientpositive/dynamic_partition_skip_default.q.out @@ -74,48 +74,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: dynamic_part_table - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: intcol (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: partcol2=1 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -159,14 +125,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.dynamic_part_table name: default.dynamic_part_table - Truncated Path -> Alias: - /dynamic_part_table/partcol1=1/partcol2=1 [dynamic_part_table] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: dynamic_part_table + Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: intcol (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: explain extended select intcol from dynamic_part_table where partcol1='1' and partcol2='1' PREHOOK: type: QUERY @@ -200,48 +168,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: dynamic_part_table - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: intcol (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: partcol2=1 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -285,14 +219,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.dynamic_part_table name: default.dynamic_part_table - Truncated Path -> Alias: - /dynamic_part_table/partcol1=1/partcol2=1 [dynamic_part_table] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: dynamic_part_table + Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: intcol (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 1 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: explain extended select intcol from dynamic_part_table where (partcol1='1' and partcol2='1')or (partcol1='1' and partcol2='__HIVE_DEFAULT_PARTITION__') PREHOOK: type: QUERY @@ -336,48 +272,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: dynamic_part_table - Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: intcol (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: partcol2=1 input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -421,9 +323,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.dynamic_part_table name: default.dynamic_part_table -#### A masked pattern was here #### Partition - base file name: partcol2=__HIVE_DEFAULT_PARTITION__ input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat partition values: @@ -467,13 +367,14 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.dynamic_part_table name: default.dynamic_part_table - Truncated Path -> Alias: - /dynamic_part_table/partcol1=1/partcol2=1 [dynamic_part_table] - /dynamic_part_table/partcol1=1/partcol2=__HIVE_DEFAULT_PARTITION__ [dynamic_part_table] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: dynamic_part_table + Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: intcol (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 2 Basic stats: COMPLETE Column stats: NONE + ListSink diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out index e0b9bd1..4dcbb0f 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out @@ -342,11 +342,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string) sort order: ++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reduce Operator Tree: Extract @@ -399,11 +399,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reduce Operator Tree: Extract @@ -691,11 +691,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string) sort order: ++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reduce Operator Tree: Extract @@ -748,11 +748,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reduce Operator Tree: Extract @@ -878,8 +878,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 16 - rawDataSize 320 + numRows 32 + rawDataSize 640 totalSize 1348 #### A masked pattern was here #### @@ -922,9 +922,9 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 3 - rawDataSize 60 - totalSize 1034 + numRows 6 + rawDataSize 120 + totalSize 1050 #### A masked pattern was here #### # Storage Information @@ -966,8 +966,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 7 - rawDataSize 140 + numRows 14 + rawDataSize 280 totalSize 1166 #### A masked pattern was here #### @@ -1010,9 +1010,9 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 3 - rawDataSize 60 - totalSize 1040 + numRows 6 + rawDataSize 120 + totalSize 1050 #### A masked pattern was here #### # Storage Information @@ -1053,9 +1053,9 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 16 - rawDataSize 320 - totalSize 4332 + numRows 32 + rawDataSize 640 + totalSize 4340 #### A masked pattern was here #### # Storage Information @@ -1096,8 +1096,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 3 - rawDataSize 60 + numRows 6 + rawDataSize 120 totalSize 2094 #### A masked pattern was here #### @@ -1139,9 +1139,9 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 16 - rawDataSize 320 - totalSize 4318 + numRows 32 + rawDataSize 640 + totalSize 4326 #### A masked pattern was here #### # Storage Information @@ -1182,8 +1182,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 3 - rawDataSize 60 + numRows 6 + rawDataSize 120 totalSize 2094 #### A masked pattern was here #### @@ -1408,6 +1408,254 @@ STAGE PLANS: Stage: Stage-3 Stats-Aggr Operator +PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: over1k_orc + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col0 is null or (_col0 = 27)) (type: boolean) + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col4 (type: tinyint) + sort order: + + Map-reduce partition columns: _col4 (type: tinyint) + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Reduce Operator Tree: + Extract + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: over1k_orc + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (t is null or (t = 27)) (type: boolean) + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + outputColumnNames: si, i, b, f, t + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + sort order: +++++ + Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +PREHOOK: type: QUERY +POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: over1k_orc + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (t is null or (t = 27)) (type: boolean) + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + outputColumnNames: si, i, b, f, t + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + sort order: +++++ + Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col4 (type: tinyint) + sort order: + + Map-reduce partition columns: _col4 (type: tinyint) + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Reduce Operator Tree: + Extract + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-3 + Stats-Aggr Operator + PREHOOK: query: insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by i PREHOOK: type: QUERY PREHOOK: Input: default@over1k_orc @@ -1815,11 +2063,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reduce Operator Tree: Extract @@ -2017,31 +2265,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1k_part_buck_sort2_orc - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2235,31 +2483,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1k_part_buck_sort2_orc - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out index 941aa9e..f4b1013 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization.q.out @@ -275,11 +275,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string) sort order: ++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reduce Operator Tree: Extract Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE @@ -331,11 +331,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reduce Operator Tree: Extract Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE @@ -598,11 +598,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string) sort order: ++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reduce Operator Tree: Extract Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE @@ -654,11 +654,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reduce Operator Tree: Extract Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE @@ -783,8 +783,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 16 - rawDataSize 415 + numRows 32 + rawDataSize 830 totalSize 862 #### A masked pattern was here #### @@ -827,8 +827,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 3 - rawDataSize 78 + numRows 6 + rawDataSize 156 totalSize 162 #### A masked pattern was here #### @@ -871,8 +871,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 7 - rawDataSize 181 + numRows 14 + rawDataSize 362 totalSize 376 #### A masked pattern was here #### @@ -915,8 +915,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 3 - rawDataSize 78 + numRows 6 + rawDataSize 156 totalSize 162 #### A masked pattern was here #### @@ -958,8 +958,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 16 - rawDataSize 415 + numRows 32 + rawDataSize 830 totalSize 862 #### A masked pattern was here #### @@ -1001,8 +1001,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 3 - rawDataSize 78 + numRows 6 + rawDataSize 156 totalSize 162 #### A masked pattern was here #### @@ -1044,8 +1044,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 16 - rawDataSize 415 + numRows 32 + rawDataSize 830 totalSize 862 #### A masked pattern was here #### @@ -1087,8 +1087,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 3 - rawDataSize 78 + numRows 6 + rawDataSize 156 totalSize 162 #### A masked pattern was here #### @@ -1311,6 +1311,251 @@ STAGE PLANS: Stage: Stage-3 Stats-Aggr Operator +PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: over1k + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col0 is null or (_col0 = 27)) (type: boolean) + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col4 (type: tinyint) + sort order: + + Map-reduce partition columns: _col4 (type: tinyint) + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Reduce Operator Tree: + Extract + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: over1k + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (t is null or (t = 27)) (type: boolean) + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + outputColumnNames: si, i, b, f, t + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + sort order: +++++ + Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +PREHOOK: type: QUERY +POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: over1k + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (t is null or (t = 27)) (type: boolean) + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + outputColumnNames: si, i, b, f, t + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + sort order: +++++ + Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col4 (type: tinyint) + sort order: + + Map-reduce partition columns: _col4 (type: tinyint) + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Reduce Operator Tree: + Extract + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-3 + Stats-Aggr Operator + PREHOOK: query: insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i PREHOOK: type: QUERY PREHOOK: Input: default@over1k @@ -1717,11 +1962,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reduce Operator Tree: Extract Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out new file mode 100644 index 0000000..9b57bbb --- /dev/null +++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out @@ -0,0 +1,1782 @@ +PREHOOK: query: drop table ss +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ss +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table ss_orc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ss_orc +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table ss_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ss_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table ss_part_orc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ss_part_orc +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table ss ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ss +POSTHOOK: query: create table ss ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ss +PREHOOK: query: create table ss_part ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ss_part +POSTHOOK: query: create table ss_part ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ss_part +PREHOOK: query: load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@ss +POSTHOOK: query: load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@ss +PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: ss + Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + outputColumnNames: ss_sold_date_sk, ss_net_paid_inc_tax, ss_net_profit + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float) + sort order: +++ + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_part +POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 151 + totalSize 162 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +2.1 -2026.3 2452617 +2.99 -11.32 2452617 +85.8 25.61 2452617 +552.96 -1363.84 2452617 +565.92 196.48 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +7412.83 2071.68 2452617 +10022.63 3952.8 2452617 +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 186 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +0.15 -241.22 2452638 +150.39 -162.12 2452638 +156.67 -4626.56 2452638 +181.03 -207.24 2452638 +267.01 -3266.36 2452638 +317.87 -3775.38 2452638 +1327.08 57.97 2452638 +1413.19 178.08 2452638 +1524.33 494.37 2452638 +1971.35 -488.25 2452638 +4133.98 -775.72 2452638 +4329.49 -4000.51 2452638 +10171.1 660.48 2452638 +PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: ss + Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_part +POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 151 + totalSize 162 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +10022.63 3952.8 2452617 +2.99 -11.32 2452617 +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +565.92 196.48 2452617 +85.8 25.61 2452617 +7412.83 2071.68 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +552.96 -1363.84 2452617 +2.1 -2026.3 2452617 +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 186 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +4329.49 -4000.51 2452638 +1413.19 178.08 2452638 +150.39 -162.12 2452638 +1524.33 494.37 2452638 +0.15 -241.22 2452638 +267.01 -3266.36 2452638 +181.03 -207.24 2452638 +1971.35 -488.25 2452638 +1327.08 57.97 2452638 +156.67 -4626.56 2452638 +317.87 -3775.38 2452638 +10171.1 660.48 2452638 +4133.98 -775.72 2452638 +PREHOOK: query: -- SORT DYNAMIC PARTITION DISABLED + +explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: -- SORT DYNAMIC PARTITION DISABLED + +explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: ss + Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + outputColumnNames: ss_sold_date_sk, ss_net_paid_inc_tax, ss_net_profit + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float) + sort order: +++ + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_part +POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 151 + totalSize 162 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +2.1 -2026.3 2452617 +2.99 -11.32 2452617 +85.8 25.61 2452617 +552.96 -1363.84 2452617 +565.92 196.48 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +7412.83 2071.68 2452617 +10022.63 3952.8 2452617 +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 186 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +0.15 -241.22 2452638 +150.39 -162.12 2452638 +156.67 -4626.56 2452638 +181.03 -207.24 2452638 +267.01 -3266.36 2452638 +317.87 -3775.38 2452638 +1327.08 57.97 2452638 +1413.19 178.08 2452638 +1524.33 494.37 2452638 +1971.35 -488.25 2452638 +4133.98 -775.72 2452638 +4329.49 -4000.51 2452638 +10171.1 660.48 2452638 +PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: ss + Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_part +POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 151 + totalSize 162 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +565.92 196.48 2452617 +85.8 25.61 2452617 +7412.83 2071.68 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +552.96 -1363.84 2452617 +2.1 -2026.3 2452617 +10022.63 3952.8 2452617 +2.99 -11.32 2452617 +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 186 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +4329.49 -4000.51 2452638 +1413.19 178.08 2452638 +150.39 -162.12 2452638 +1524.33 494.37 2452638 +0.15 -241.22 2452638 +267.01 -3266.36 2452638 +181.03 -207.24 2452638 +1971.35 -488.25 2452638 +1327.08 57.97 2452638 +156.67 -4626.56 2452638 +317.87 -3775.38 2452638 +10171.1 660.48 2452638 +4133.98 -775.72 2452638 +PREHOOK: query: -- VECTORIZATION IS ENABLED + +create table ss_orc ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ss_orc +POSTHOOK: query: -- VECTORIZATION IS ENABLED + +create table ss_orc ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ss_orc +PREHOOK: query: create table ss_part_orc ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ss_part_orc +POSTHOOK: query: create table ss_part_orc ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ss_part_orc +PREHOOK: query: insert overwrite table ss_orc select * from ss +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_orc +POSTHOOK: query: insert overwrite table ss_orc select * from ss +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_orc +POSTHOOK: Lineage: ss_orc.ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_orc.ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_orc.ss_sold_date_sk SIMPLE [(ss)ss.FieldSchema(name:ss_sold_date_sk, type:int, comment:null), ] +PREHOOK: query: drop table ss +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss +POSTHOOK: query: drop table ss +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss +PREHOOK: query: drop table ss_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ss_part +PREHOOK: Output: default@ss_part +POSTHOOK: query: drop table ss_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ss_part +POSTHOOK: Output: default@ss_part +PREHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: ss_orc + Statistics: Num rows: 24 Data size: 288 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + outputColumnNames: ss_sold_date_sk, ss_net_paid_inc_tax, ss_net_profit + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float) + sort order: +++ + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.ss_part_orc + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.ss_part_orc + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_orc +PREHOOK: Output: default@ss_part_orc +POSTHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_orc +POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part_orc +POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part_orc +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part_orc +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 88 + totalSize 417 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part_orc +PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +2.1 -2026.3 2452617 +2.99 -11.32 2452617 +85.8 25.61 2452617 +552.96 -1363.84 2452617 +565.92 196.48 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +7412.83 2071.68 2452617 +10022.63 3952.8 2452617 +PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part_orc +POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part_orc +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part_orc +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 104 + totalSize 440 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part_orc +PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +0.15 -241.22 2452638 +150.39 -162.12 2452638 +156.67 -4626.56 2452638 +181.03 -207.24 2452638 +267.01 -3266.36 2452638 +317.87 -3775.38 2452638 +1327.08 57.97 2452638 +1413.19 178.08 2452638 +1524.33 494.37 2452638 +1971.35 -488.25 2452638 +4133.98 -775.72 2452638 +4329.49 -4000.51 2452638 +10171.1 660.48 2452638 +PREHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: ss_orc + Statistics: Num rows: 24 Data size: 288 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.ss_part_orc + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.ss_part_orc + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_orc +PREHOOK: Output: default@ss_part_orc +POSTHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_orc +POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part_orc +POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part_orc +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part_orc +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 88 + totalSize 417 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part_orc +PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +565.92 196.48 2452617 +85.8 25.61 2452617 +7412.83 2071.68 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +552.96 -1363.84 2452617 +2.1 -2026.3 2452617 +10022.63 3952.8 2452617 +2.99 -11.32 2452617 +PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part_orc +POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part_orc +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part_orc +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 104 + totalSize 440 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part_orc +PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +4329.49 -4000.51 2452638 +1413.19 178.08 2452638 +150.39 -162.12 2452638 +1524.33 494.37 2452638 +0.15 -241.22 2452638 +267.01 -3266.36 2452638 +181.03 -207.24 2452638 +1971.35 -488.25 2452638 +1327.08 57.97 2452638 +156.67 -4626.56 2452638 +317.87 -3775.38 2452638 +10171.1 660.48 2452638 +4133.98 -775.72 2452638 +PREHOOK: query: drop table ss_orc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ss_orc +PREHOOK: Output: default@ss_orc +POSTHOOK: query: drop table ss_orc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ss_orc +POSTHOOK: Output: default@ss_orc +PREHOOK: query: drop table ss_part_orc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ss_part_orc +PREHOOK: Output: default@ss_part_orc +POSTHOOK: query: drop table ss_part_orc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Output: default@ss_part_orc +PREHOOK: query: drop table if exists hive13_dp1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists hive13_dp1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table if not exists hive13_dp1 ( + k1 int, + k2 int +) +PARTITIONED BY(`day` string) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@hive13_dp1 +POSTHOOK: query: create table if not exists hive13_dp1 ( + k1 int, + k2 int +) +PARTITIONED BY(`day` string) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@hive13_dp1 +PREHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(value) + keys: 'day' (type: string), key (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 + + Stage: Stage-0 + Move Operator + tables: + partition: + day + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@hive13_dp1 +POSTHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@hive13_dp1@day=day +POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from hive13_dp1 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@hive13_dp1 +PREHOOK: Input: default@hive13_dp1@day=day +#### A masked pattern was here #### +POSTHOOK: query: select * from hive13_dp1 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@hive13_dp1 +POSTHOOK: Input: default@hive13_dp1@day=day +#### A masked pattern was here #### +0 3 day +10 1 day +100 2 day +103 2 day +104 2 day +PREHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(value) + keys: 'day' (type: string), key (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 + + Stage: Stage-0 + Move Operator + tables: + partition: + day + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@hive13_dp1 +POSTHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@hive13_dp1@day=day +POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from hive13_dp1 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@hive13_dp1 +PREHOOK: Input: default@hive13_dp1@day=day +#### A masked pattern was here #### +POSTHOOK: query: select * from hive13_dp1 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@hive13_dp1 +POSTHOOK: Input: default@hive13_dp1@day=day +#### A masked pattern was here #### +0 3 day +10 1 day +100 2 day +103 2 day +104 2 day +PREHOOK: query: drop table hive13_dp1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@hive13_dp1 +PREHOOK: Output: default@hive13_dp1 +POSTHOOK: query: drop table hive13_dp1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@hive13_dp1 +POSTHOOK: Output: default@hive13_dp1 diff --git a/ql/src/test/results/clientpositive/explain_dependency.q.out b/ql/src/test/results/clientpositive/explain_dependency.q.out index cb98d54..b9a8ed8 100644 --- a/ql/src/test/results/clientpositive/explain_dependency.q.out +++ b/ql/src/test/results/clientpositive/explain_dependency.q.out @@ -88,7 +88,7 @@ POSTHOOK: query: EXPLAIN DEPENDENCY SELECT key, value FROM src UNION ALL SELECT key, value FROM srcpart WHERE ds IS NOT NULL ) S1 POSTHOOK: type: QUERY -{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"},{"tablename":"default@src","tabletype":"MANAGED_TABLE"}]} +{"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}]} PREHOOK: query: EXPLAIN DEPENDENCY SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE ds IS NOT NULL PREHOOK: type: QUERY @@ -112,12 +112,12 @@ PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V3 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V3 POSTHOOK: type: QUERY -{"input_partitions":[{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@v3","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v3]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v3]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}]} +{"input_partitions":[{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@v3","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v3]"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v3]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}]} PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V4 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V4 POSTHOOK: type: QUERY -{"input_partitions":[{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@v4","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v4, default@v1]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}]} +{"input_partitions":[{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionParents":"[default@v2]","partitionName":"default@srcpart@ds=2008-04-09/hr=12"}],"input_tables":[{"tablename":"default@v4","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v4, default@v1]"},{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}]} PREHOOK: query: -- The table should show up in the explain dependency even if none -- of the partitions are selected. CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out index a25d094..5a9cc2a 100644 --- a/ql/src/test/results/clientpositive/explain_logical.q.out +++ b/ql/src/test/results/clientpositive/explain_logical.q.out @@ -453,13 +453,7 @@ v1:src expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_3) - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + ListSink (OP_4) PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V2 PREHOOK: type: QUERY @@ -490,13 +484,7 @@ v2:srcpart expressions: ds (type: string), key (type: string), value (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_4) - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + ListSink (OP_6) PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V3 PREHOOK: type: QUERY @@ -731,13 +719,7 @@ v5:srcpart expressions: key (type: string), value (type: string), '10' (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - File Output Operator (FS_4) - compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + ListSink (OP_6) PREHOOK: query: EXPLAIN LOGICAL SELECT s1.key, s1.cnt, s2.value FROM (SELECT key, count(value) as cnt FROM src GROUP BY key) s1 JOIN src s2 ON (s1.key = s2.key) ORDER BY s1.key PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out index ef63e74..29cfefa 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out @@ -108,48 +108,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_1d - Statistics: Num rows: 6 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: state (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -170,7 +136,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 342 + totalSize 343 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -192,9 +158,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -215,7 +179,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 364 + totalSize 361 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -237,15 +201,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d - Truncated Path -> Alias: - /loc_orc_1d/year=2000 [loc_orc_1d] - /loc_orc_1d/year=2001 [loc_orc_1d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_1d + Statistics: Num rows: 6 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL -- basicStatState: COMPLETE colStatState: PARTIAL @@ -276,48 +241,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_1d - Statistics: Num rows: 6 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: state (type: string), locid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1 - columns.types string:int - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -338,7 +269,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 342 + totalSize 343 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -360,9 +291,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -383,7 +312,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 364 + totalSize 361 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -405,15 +334,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d - Truncated Path -> Alias: - /loc_orc_1d/year=2000 [loc_orc_1d] - /loc_orc_1d/year=2001 [loc_orc_1d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_1d + Statistics: Num rows: 6 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: create table if not exists loc_orc_2d ( state string, @@ -510,48 +440,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_2d - Statistics: Num rows: 6 Data size: 532 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: state (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -573,7 +469,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 260 + totalSize 258 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -595,9 +491,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -641,9 +535,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -665,7 +557,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 247 + totalSize 245 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -687,9 +579,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -711,7 +601,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 247 + totalSize 245 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -733,17 +623,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d - Truncated Path -> Alias: - /loc_orc_2d/zip=94086/year=2000 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2001 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2000 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2001 [loc_orc_2d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_2d + Statistics: Num rows: 6 Data size: 532 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: explain extended select state,locid from loc_orc_2d PREHOOK: type: QUERY @@ -770,48 +659,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_2d - Statistics: Num rows: 6 Data size: 532 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: state (type: string), locid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1 - columns.types string:int - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -833,7 +688,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 260 + totalSize 258 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -855,9 +710,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -901,9 +754,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -925,7 +776,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 247 + totalSize 245 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -947,9 +798,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -971,7 +820,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 247 + totalSize 245 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -993,15 +842,14 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d - Truncated Path -> Alias: - /loc_orc_2d/zip=94086/year=2000 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2001 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2000 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2001 [loc_orc_2d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_2d + Statistics: Num rows: 6 Data size: 532 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE + ListSink diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out index cbe210b..db5c77c 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out @@ -125,48 +125,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_1d - Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL - GatherStats: false - Select Operator - expressions: state (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 20 Data size: 1780 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 20 Data size: 1780 Basic stats: COMPLETE Column stats: PARTIAL -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -187,7 +153,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 342 + totalSize 343 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -209,9 +175,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -232,7 +196,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 364 + totalSize 362 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -254,9 +218,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -277,7 +239,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 383 + totalSize 386 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -299,9 +261,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -322,7 +282,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 390 + totalSize 388 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -344,17 +304,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d - Truncated Path -> Alias: - /loc_orc_1d/year=2000 [loc_orc_1d] - /loc_orc_1d/year=2001 [loc_orc_1d] - /loc_orc_1d/year=2002 [loc_orc_1d] - /loc_orc_1d/year=2003 [loc_orc_1d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_1d + Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 20 Data size: 1780 Basic stats: COMPLETE Column stats: PARTIAL + ListSink PREHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL -- basicStatState: COMPLETE colStatState: PARTIAL @@ -385,48 +344,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_1d - Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL - GatherStats: false - Select Operator - expressions: state (type: string), locid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 20 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 20 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1 - columns.types string:int - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -447,7 +372,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 342 + totalSize 343 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -469,9 +394,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -492,7 +415,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 364 + totalSize 362 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -514,9 +437,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -537,7 +458,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 383 + totalSize 386 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -559,9 +480,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -582,7 +501,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 390 + totalSize 388 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -604,17 +523,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d - Truncated Path -> Alias: - /loc_orc_1d/year=2000 [loc_orc_1d] - /loc_orc_1d/year=2001 [loc_orc_1d] - /loc_orc_1d/year=2002 [loc_orc_1d] - /loc_orc_1d/year=2003 [loc_orc_1d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_1d + Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 20 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL + ListSink PREHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state PREHOOK: type: QUERY @@ -658,48 +576,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_1d - Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Select Operator - expressions: state (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -720,7 +604,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 342 + totalSize 343 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -742,9 +626,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -765,7 +647,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 364 + totalSize 362 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -787,9 +669,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -810,7 +690,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 383 + totalSize 386 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -832,9 +712,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -855,7 +733,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 390 + totalSize 388 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -877,17 +755,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d - Truncated Path -> Alias: - /loc_orc_1d/year=2000 [loc_orc_1d] - /loc_orc_1d/year=2001 [loc_orc_1d] - /loc_orc_1d/year=2002 [loc_orc_1d] - /loc_orc_1d/year=2003 [loc_orc_1d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_1d + Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: explain extended select state,locid from loc_orc_1d PREHOOK: type: QUERY @@ -914,48 +791,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_1d - Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL - GatherStats: false - Select Operator - expressions: state (type: string), locid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: PARTIAL -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1 - columns.types string:int - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -976,7 +819,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 342 + totalSize 343 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -998,9 +841,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1021,7 +862,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 364 + totalSize 362 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1043,9 +884,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1066,7 +905,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 383 + totalSize 386 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1088,9 +927,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1111,7 +948,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 390 + totalSize 388 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1133,17 +970,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_1d name: default.loc_orc_1d - Truncated Path -> Alias: - /loc_orc_1d/year=2000 [loc_orc_1d] - /loc_orc_1d/year=2001 [loc_orc_1d] - /loc_orc_1d/year=2002 [loc_orc_1d] - /loc_orc_1d/year=2003 [loc_orc_1d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_1d + Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: PARTIAL + ListSink PREHOOK: query: create table if not exists loc_orc_2d ( state string, @@ -1241,48 +1077,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_2d - Statistics: Num rows: 20 Data size: 1788 Basic stats: COMPLETE Column stats: PARTIAL - GatherStats: false - Select Operator - expressions: state (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 20 Data size: 1760 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 20 Data size: 1760 Basic stats: COMPLETE Column stats: PARTIAL -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1304,7 +1106,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 264 + totalSize 262 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1326,9 +1128,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1350,7 +1150,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 278 + totalSize 276 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1372,9 +1172,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1396,7 +1194,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 280 + totalSize 277 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1418,9 +1216,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1442,7 +1238,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 260 + totalSize 258 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1464,9 +1260,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1488,7 +1282,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 257 + totalSize 255 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1510,9 +1304,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1534,7 +1326,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 269 + totalSize 265 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1556,9 +1348,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1580,7 +1370,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 278 + totalSize 274 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1602,9 +1392,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1626,7 +1414,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 247 + totalSize 245 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1648,9 +1436,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1672,7 +1458,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 247 + totalSize 245 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1694,9 +1480,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1740,9 +1524,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1764,7 +1546,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 271 + totalSize 270 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1786,24 +1568,16 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d - Truncated Path -> Alias: - /loc_orc_2d/zip=43201/year=2001 [loc_orc_2d] - /loc_orc_2d/zip=43201/year=2002 [loc_orc_2d] - /loc_orc_2d/zip=43201/year=2003 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2000 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2001 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2002 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2003 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2000 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2001 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2002 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2003 [loc_orc_2d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_2d + Statistics: Num rows: 20 Data size: 1788 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 20 Data size: 1760 Basic stats: COMPLETE Column stats: PARTIAL + ListSink PREHOOK: query: explain extended select state,locid from loc_orc_2d PREHOOK: type: QUERY @@ -1830,48 +1604,14 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: loc_orc_2d - Statistics: Num rows: 20 Data size: 1788 Basic stats: COMPLETE Column stats: PARTIAL - GatherStats: false - Select Operator - expressions: state (type: string), locid (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 20 Data size: 1840 Basic stats: COMPLETE Column stats: PARTIAL - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 20 Data size: 1840 Basic stats: COMPLETE Column stats: PARTIAL -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1 - columns.types string:int - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### + Stage: Stage-0 + Fetch Operator + limit: -1 + Partition Description: Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1893,7 +1633,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 264 + totalSize 262 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1915,9 +1655,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1939,7 +1677,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 278 + totalSize 276 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1961,9 +1699,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -1985,7 +1721,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 280 + totalSize 277 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2007,9 +1743,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -2031,7 +1765,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 260 + totalSize 258 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2053,9 +1787,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -2077,7 +1809,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 257 + totalSize 255 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2099,9 +1831,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -2123,7 +1853,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 269 + totalSize 265 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2145,9 +1875,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -2169,7 +1897,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 278 + totalSize 274 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2191,9 +1919,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2000 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -2215,7 +1941,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 247 + totalSize 245 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2237,9 +1963,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2001 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -2261,7 +1985,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 247 + totalSize 245 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2283,9 +2007,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2002 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -2329,9 +2051,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d -#### A masked pattern was here #### Partition - base file name: year=2003 input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat partition values: @@ -2353,7 +2073,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 271 + totalSize 270 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2375,22 +2095,14 @@ STAGE PLANS: serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.loc_orc_2d name: default.loc_orc_2d - Truncated Path -> Alias: - /loc_orc_2d/zip=43201/year=2001 [loc_orc_2d] - /loc_orc_2d/zip=43201/year=2002 [loc_orc_2d] - /loc_orc_2d/zip=43201/year=2003 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2000 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2001 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2002 [loc_orc_2d] - /loc_orc_2d/zip=94086/year=2003 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2000 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2001 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2002 [loc_orc_2d] - /loc_orc_2d/zip=94087/year=2003 [loc_orc_2d] - - Stage: Stage-0 - Fetch Operator - limit: -1 Processor Tree: - ListSink + TableScan + alias: loc_orc_2d + Statistics: Num rows: 20 Data size: 1788 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 20 Data size: 1840 Basic stats: COMPLETE Column stats: PARTIAL + ListSink diff --git a/ql/src/test/results/clientpositive/filter_numeric.q.out b/ql/src/test/results/clientpositive/filter_numeric.q.out index ae52ba0..b6b8339 100644 --- a/ql/src/test/results/clientpositive/filter_numeric.q.out +++ b/ql/src/test/results/clientpositive/filter_numeric.q.out @@ -33,18 +33,24 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, hr from partint where hr < 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: partint + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (hr < 11) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + ListSink PREHOOK: query: select key, value, hr from partint where hr < 11 PREHOOK: type: QUERY @@ -60,33 +66,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, hr from partint where hr <= 12 and hr > 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: partint - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), hr (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: partint + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select key, value, hr from partint where hr <= 12 and hr > 11 PREHOOK: type: QUERY @@ -604,33 +598,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, hr from partint where hr between 11 and 12 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: partint - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), hr (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: partint + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select key, value, hr from partint where hr between 11 and 12 PREHOOK: type: QUERY @@ -1650,33 +1632,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, hr from partint where hr not between 12 and 14 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: partint - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), hr (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: partint + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select key, value, hr from partint where hr not between 12 and 14 PREHOOK: type: QUERY @@ -2194,33 +2164,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select key, value, hr from partint where hr < 13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: partint - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), hr (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: partint + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), hr (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select key, value, hr from partint where hr < 13 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby4_map.q.out b/ql/src/test/results/clientpositive/groupby4_map.q.out index bbd63ad..89a0778 100644 --- a/ql/src/test/results/clientpositive/groupby4_map.q.out +++ b/ql/src/test/results/clientpositive/groupby4_map.q.out @@ -44,10 +44,10 @@ STAGE PLANS: Select Operator expressions: UDFToInteger(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/groupby4_map_skew.q.out b/ql/src/test/results/clientpositive/groupby4_map_skew.q.out index 0c15ea6..fc9e7d1 100644 --- a/ql/src/test/results/clientpositive/groupby4_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby4_map_skew.q.out @@ -44,10 +44,10 @@ STAGE PLANS: Select Operator expressions: UDFToInteger(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/groupby_cube1.q.out b/ql/src/test/results/clientpositive/groupby_cube1.q.out index 0f840d6..7b5d70a 100644 --- a/ql/src/test/results/clientpositive/groupby_cube1.q.out +++ b/ql/src/test/results/clientpositive/groupby_cube1.q.out @@ -44,12 +44,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -128,12 +128,12 @@ STAGE PLANS: keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0) @@ -200,12 +200,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -213,7 +213,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -229,7 +229,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -308,12 +308,12 @@ STAGE PLANS: keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0) @@ -405,12 +405,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Select Operator expressions: key (type: string), val (type: string) @@ -421,7 +421,7 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -434,7 +434,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -450,7 +450,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -493,7 +493,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -501,7 +501,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -517,7 +517,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out index 75aaddc..7c88a67 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out @@ -43,12 +43,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -56,7 +56,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -72,7 +72,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -149,12 +149,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: double) Reduce Operator Tree: Group By Operator @@ -162,7 +162,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -178,7 +178,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: double) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out index a1842c1..6ae6e55 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out @@ -62,12 +62,12 @@ STAGE PLANS: keys: a (type: string), b (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: struct), _col4 (type: bigint) Reduce Operator Tree: Group By Operator @@ -149,12 +149,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: struct), _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -162,7 +162,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -178,7 +178,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: struct), _col4 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out index ce229d3..cf1515c 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out @@ -49,12 +49,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -172,12 +172,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/groupby_rollup1.q.out index 827b081..bc77aea 100644 --- a/ql/src/test/results/clientpositive/groupby_rollup1.q.out +++ b/ql/src/test/results/clientpositive/groupby_rollup1.q.out @@ -44,12 +44,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -122,12 +122,12 @@ STAGE PLANS: keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0) @@ -194,12 +194,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -207,7 +207,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -223,7 +223,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -296,12 +296,12 @@ STAGE PLANS: keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0) @@ -393,12 +393,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Select Operator expressions: key (type: string), val (type: string) @@ -409,7 +409,7 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -422,7 +422,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -438,7 +438,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -481,7 +481,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -489,7 +489,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -505,7 +505,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/groupby_sort_11.q.out b/ql/src/test/results/clientpositive/groupby_sort_11.q.out index 311815a..a175852 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_11.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_11.q.out @@ -388,32 +388,32 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT 1) keys: 1 (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + - Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col0:0._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/groupby_sort_6.q.out b/ql/src/test/results/clientpositive/groupby_sort_6.q.out index cd1f979..67dee46 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_6.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_6.q.out @@ -367,12 +367,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: false diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out index 1a6dae8..8f83ada 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_dyn_part.q.out @@ -310,9 +310,9 @@ SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] +Num Buckets: 1 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: DESCRIBE FORMATTED test_table PARTITION (ds='2008-04-08', hr='12') @@ -352,9 +352,9 @@ SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat Compressed: No -Num Buckets: -1 -Bucket Columns: [] -Sort Columns: [] +Num Buckets: 1 +Bucket Columns: [key] +Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 PREHOOK: query: CREATE TABLE srcpart_merge_dp LIKE srcpart @@ -468,14 +468,13 @@ GROUP BY key) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 - Stage-5 - Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 - Stage-3 depends on stages: Stage-0 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 Stage-4 - Stage-6 - Stage-7 depends on stages: Stage-6 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -513,37 +512,17 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col2 (type: string), _col0 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.test_table + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.test_table - Stage: Stage-8 + Stage: Stage-7 Conditional Operator - Stage: Stage-5 + Stage: Stage-4 Move Operator files: hdfs directory: true @@ -562,24 +541,24 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe name: default.test_table - Stage: Stage-3 + Stage: Stage-2 Stats-Aggr Operator - Stage: Stage-4 + Stage: Stage-3 Merge File Operator Map Operator Tree: RCFile Merge Operator merge level: block input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - Stage: Stage-6 + Stage: Stage-5 Merge File Operator Map Operator Tree: RCFile Merge Operator merge level: block input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - Stage: Stage-7 + Stage: Stage-6 Move Operator files: hdfs directory: true @@ -639,7 +618,7 @@ Partition Parameters: numFiles 1 numRows 0 rawDataSize 0 - totalSize 94 + totalSize 115 #### A masked pattern was here #### # Storage Information @@ -678,10 +657,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 1 + numFiles 2 numRows 0 rawDataSize 0 - totalSize 1341 + totalSize 1427 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/input24.q.out b/ql/src/test/results/clientpositive/input24.q.out index 4e20be6..75689b3 100644 --- a/ql/src/test/results/clientpositive/input24.q.out +++ b/ql/src/test/results/clientpositive/input24.q.out @@ -29,31 +29,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: x - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/input30.q.out b/ql/src/test/results/clientpositive/input30.q.out index e5b6737..ae89ccb 100644 --- a/ql/src/test/results/clientpositive/input30.q.out +++ b/ql/src/test/results/clientpositive/input30.q.out @@ -57,10 +57,10 @@ STAGE PLANS: Select Operator expressions: UDFToInteger(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/input32.q.out b/ql/src/test/results/clientpositive/input32.q.out index 1cfc189..eba9518 100644 --- a/ql/src/test/results/clientpositive/input32.q.out +++ b/ql/src/test/results/clientpositive/input32.q.out @@ -54,10 +54,10 @@ STAGE PLANS: Select Operator expressions: UDFToInteger(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/input4.q.out b/ql/src/test/results/clientpositive/input4.q.out index 8281d7c..4b81761 100644 --- a/ql/src/test/results/clientpositive/input4.q.out +++ b/ql/src/test/results/clientpositive/input4.q.out @@ -44,7 +44,7 @@ PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN FORMATTED SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias POSTHOOK: type: QUERY -{"STAGE PLANS":{"Stage-1":{"Map Reduce":{"Map Operator Tree:":[{"TableScan":{"alias:":"input4alias","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","outputColumnNames:":["_col0","_col1"],"children":{"File Output Operator":{"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE","compressed:":"false","table:":{"serde:":"org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe","input format:":"org.apache.hadoop.mapred.TextInputFormat","output format:":"org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"}}},"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE"}}]}},"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"ListSink":{}}}}},"STAGE DEPENDENCIES":{"Stage-1":{"ROOT STAGE":"TRUE"},"Stage-0":{"DEPENDENT STAGES":"Stage-1"}}} +{"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"-1","Processor Tree:":{"TableScan":{"alias:":"input4alias","children":{"Select Operator":{"expressions:":"value (type: string), key (type: string)","outputColumnNames:":["_col0","_col1"],"children":{"ListSink":{}},"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE"}},"Statistics:":"Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE"}}}}},"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}}} PREHOOK: query: SELECT Input4Alias.VALUE, Input4Alias.KEY FROM INPUT4 AS Input4Alias PREHOOK: type: QUERY PREHOOK: Input: default@input4 diff --git a/ql/src/test/results/clientpositive/insert_acid_dynamic_partition.q.out b/ql/src/test/results/clientpositive/insert_acid_dynamic_partition.q.out new file mode 100644 index 0000000..07eedf3 --- /dev/null +++ b/ql/src/test/results/clientpositive/insert_acid_dynamic_partition.q.out @@ -0,0 +1,48 @@ +PREHOOK: query: create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_dynamic +POSTHOOK: query: create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_dynamic +PREHOOK: query: insert into table acid_dynamic partition (ds) select cint, cast(cstring1 as varchar(128)), cstring2 from alltypesorc where cint is not null and cint < 0 order by cint limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@acid_dynamic +POSTHOOK: query: insert into table acid_dynamic partition (ds) select cint, cast(cstring1 as varchar(128)), cstring2 from alltypesorc where cint is not null and cint < 0 order by cint limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P +POSTHOOK: Output: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d +POSTHOOK: Output: default@acid_dynamic@ds=KbaDXiN85adbHRx58v +POSTHOOK: Output: default@acid_dynamic@ds=P76636jJ6qM17d7DIy +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4KWs6gw7lv2WYd66P).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4KWs6gw7lv2WYd66P).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4hA4KQj2vD3fI6gX82220d).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4hA4KQj2vD3fI6gX82220d).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=KbaDXiN85adbHRx58v).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=KbaDXiN85adbHRx58v).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=P76636jJ6qM17d7DIy).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=P76636jJ6qM17d7DIy).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: select * from acid_dynamic order by a,b +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_dynamic +PREHOOK: Input: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P +PREHOOK: Input: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d +PREHOOK: Input: default@acid_dynamic@ds=KbaDXiN85adbHRx58v +PREHOOK: Input: default@acid_dynamic@ds=P76636jJ6qM17d7DIy +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_dynamic order by a,b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_dynamic +POSTHOOK: Input: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P +POSTHOOK: Input: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d +POSTHOOK: Input: default@acid_dynamic@ds=KbaDXiN85adbHRx58v +POSTHOOK: Input: default@acid_dynamic@ds=P76636jJ6qM17d7DIy +#### A masked pattern was here #### +-1073279343 oj1YrV5Wa P76636jJ6qM17d7DIy +-1073051226 A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d +-1072910839 0iqrc5 KbaDXiN85adbHRx58v +-1072081801 dPkN74F7 4KWs6gw7lv2WYd66P +-1072076362 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P diff --git a/ql/src/test/results/clientpositive/insert_acid_not_bucketed.q.out b/ql/src/test/results/clientpositive/insert_acid_not_bucketed.q.out new file mode 100644 index 0000000..985ae40 --- /dev/null +++ b/ql/src/test/results/clientpositive/insert_acid_not_bucketed.q.out @@ -0,0 +1,36 @@ +PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_notbucketed +PREHOOK: query: insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@acid_notbucketed +POSTHOOK: Lineage: acid_notbucketed.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_notbucketed.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: select * from acid_notbucketed +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_notbucketed +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_notbucketed +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_notbucketed +#### A masked pattern was here #### +-1073279343 oj1YrV5Wa +-1073051226 A34p7oRr2WvUJNf +-1072910839 0iqrc5 +-1072081801 dPkN74F7 +-1072076362 2uLyD28144vklju213J1mr +-1071480828 aw724t8c5558x2xneC624 +-1071363017 Anj0oF +-1070883071 0ruyd6Y50JpdGRf6HqD +-1070551679 iUR3Q +-1069736047 k17Am8uPHWk02cEf1jet diff --git a/ql/src/test/results/clientpositive/insert_into1.q.out b/ql/src/test/results/clientpositive/insert_into1.q.out index 9b2517c..9e5f3bb 100644 --- a/ql/src/test/results/clientpositive/insert_into1.q.out +++ b/ql/src/test/results/clientpositive/insert_into1.q.out @@ -94,6 +94,31 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 10226524244 +PREHOOK: query: explain +select count(*) from insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +100 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 @@ -178,11 +203,27 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 20453048488 -PREHOOK: query: SELECT COUNT(*) FROM insert_into1 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 PREHOOK: type: QUERY PREHOOK: Input: default@insert_into1 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM insert_into1 +POSTHOOK: query: select count(*) from insert_into1 POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### @@ -271,6 +312,31 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### -826625916 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +10 PREHOOK: query: DROP TABLE insert_into1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into1 diff --git a/ql/src/test/results/clientpositive/insert_into2.q.out b/ql/src/test/results/clientpositive/insert_into2.q.out index 4fee0c6..acbedb5 100644 --- a/ql/src/test/results/clientpositive/insert_into2.q.out +++ b/ql/src/test/results/clientpositive/insert_into2.q.out @@ -87,6 +87,31 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@insert_into2@ds=1 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +select count (*) from insert_into2 where ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count (*) from insert_into2 where ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count (*) from insert_into2 where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +POSTHOOK: query: select count (*) from insert_into2 where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +100 PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -97,15 +122,29 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@insert_into2@ds=1 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='1' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1' PREHOOK: type: QUERY PREHOOK: Input: default@insert_into2 -PREHOOK: Input: default@insert_into2@ds=1 #### A masked pattern was here #### POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1' POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into2 -POSTHOOK: Input: default@insert_into2@ds=1 #### A masked pattern was here #### 200 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( @@ -217,6 +256,31 @@ POSTHOOK: Input: default@insert_into2@ds=1 POSTHOOK: Input: default@insert_into2@ds=2 #### A masked pattern was here #### -36239931656 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +100 PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') SELECT * FROM src LIMIT 50 PREHOOK: type: QUERY @@ -311,6 +375,31 @@ POSTHOOK: Input: default@insert_into2@ds=1 POSTHOOK: Input: default@insert_into2@ds=2 #### A masked pattern was here #### -27100860056 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +50 PREHOOK: query: DROP TABLE insert_into2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into2 diff --git a/ql/src/test/results/clientpositive/insert_into4.q.out b/ql/src/test/results/clientpositive/insert_into4.q.out index cecf6b4..192e60e 100644 --- a/ql/src/test/results/clientpositive/insert_into4.q.out +++ b/ql/src/test/results/clientpositive/insert_into4.q.out @@ -214,14 +214,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: insert_into4a - Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/insert_into5.q.out b/ql/src/test/results/clientpositive/insert_into5.q.out index 4e93291..490f737 100644 --- a/ql/src/test/results/clientpositive/insert_into5.q.out +++ b/ql/src/test/results/clientpositive/insert_into5.q.out @@ -233,14 +233,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: insert_into5a - Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 20 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 20 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 50 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 20 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/insert_into6.q.out b/ql/src/test/results/clientpositive/insert_into6.q.out index 53f8bac..388b200 100644 --- a/ql/src/test/results/clientpositive/insert_into6.q.out +++ b/ql/src/test/results/clientpositive/insert_into6.q.out @@ -132,8 +132,13 @@ POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into6b PARTITION (ds) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -146,23 +151,23 @@ STAGE PLANS: expressions: key (type: int), value (type: string), ds (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2680 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 250 Data size: 2680 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 250 Data size: 2680 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 250 Data size: 2680 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert_into6b + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into6b + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -179,6 +184,36 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into6b + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.insert_into6b + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + PREHOOK: query: INSERT INTO TABLE insert_into6b PARTITION (ds) SELECT * FROM insert_into6a PREHOOK: type: QUERY PREHOOK: Input: default@insert_into6a diff --git a/ql/src/test/results/clientpositive/insert_orig_table.q.out b/ql/src/test/results/clientpositive/insert_orig_table.q.out index 97a284b..5eea74d 100644 --- a/ql/src/test/results/clientpositive/insert_orig_table.q.out +++ b/ql/src/test/results/clientpositive/insert_orig_table.q.out @@ -10,7 +10,7 @@ PREHOOK: query: create table acid_iot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_iot @@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_iot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_iot diff --git a/ql/src/test/results/clientpositive/insert_update_delete.q.out b/ql/src/test/results/clientpositive/insert_update_delete.q.out index e9f9984..9a3cf4b 100644 --- a/ql/src/test/results/clientpositive/insert_update_delete.q.out +++ b/ql/src/test/results/clientpositive/insert_update_delete.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_iud -POSTHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_iud diff --git a/ql/src/test/results/clientpositive/insert_values_acid_not_bucketed.q.out b/ql/src/test/results/clientpositive/insert_values_acid_not_bucketed.q.out new file mode 100644 index 0000000..4f8ddfa --- /dev/null +++ b/ql/src/test/results/clientpositive/insert_values_acid_not_bucketed.q.out @@ -0,0 +1,28 @@ +PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_notbucketed +PREHOOK: query: insert into table acid_notbucketed values (1, 'abc'), (2, 'def') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: insert into table acid_notbucketed values (1, 'abc'), (2, 'def') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@acid_notbucketed +POSTHOOK: Lineage: acid_notbucketed.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: acid_notbucketed.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select * from acid_notbucketed +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_notbucketed +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_notbucketed +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_notbucketed +#### A masked pattern was here #### +1 abc +2 def diff --git a/ql/src/test/results/clientpositive/insert_values_dynamic_partitioned.q.out b/ql/src/test/results/clientpositive/insert_values_dynamic_partitioned.q.out index daea059..773feb4 100644 --- a/ql/src/test/results/clientpositive/insert_values_dynamic_partitioned.q.out +++ b/ql/src/test/results/clientpositive/insert_values_dynamic_partitioned.q.out @@ -1,12 +1,12 @@ PREHOOK: query: create table ivdp(i int, de decimal(5,2), - vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc + vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ivdp POSTHOOK: query: create table ivdp(i int, de decimal(5,2), - vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc + vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ivdp diff --git a/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out b/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out index ff041b8..5b1c3cc 100644 --- a/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/insert_values_non_partitioned.q.out @@ -10,7 +10,7 @@ PREHOOK: query: create table acid_ivnp(ti tinyint, b boolean, s string, vc varchar(128), - ch char(12)) clustered by (i) into 2 buckets stored as orc + ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_ivnp @@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_ivnp(ti tinyint, b boolean, s string, vc varchar(128), - ch char(12)) clustered by (i) into 2 buckets stored as orc + ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_ivnp diff --git a/ql/src/test/results/clientpositive/insert_values_orig_table.q.out b/ql/src/test/results/clientpositive/insert_values_orig_table.q.out index 69220ec..684cd1b 100644 --- a/ql/src/test/results/clientpositive/insert_values_orig_table.q.out +++ b/ql/src/test/results/clientpositive/insert_values_orig_table.q.out @@ -10,7 +10,7 @@ PREHOOK: query: create table acid_ivot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_ivot @@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_ivot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_ivot diff --git a/ql/src/test/results/clientpositive/insert_values_partitioned.q.out b/ql/src/test/results/clientpositive/insert_values_partitioned.q.out index 9fb89ff..6681992 100644 --- a/ql/src/test/results/clientpositive/insert_values_partitioned.q.out +++ b/ql/src/test/results/clientpositive/insert_values_partitioned.q.out @@ -9,7 +9,7 @@ PREHOOK: query: create table acid_ivp(ti tinyint, dt date, s string, vc varchar(128), - ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc + ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_ivp @@ -24,7 +24,7 @@ POSTHOOK: query: create table acid_ivp(ti tinyint, dt date, s string, vc varchar(128), - ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc + ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_ivp diff --git a/ql/src/test/results/clientpositive/insert_values_tmp_table.q.out b/ql/src/test/results/clientpositive/insert_values_tmp_table.q.out index 95d6372..170b4a7 100644 --- a/ql/src/test/results/clientpositive/insert_values_tmp_table.q.out +++ b/ql/src/test/results/clientpositive/insert_values_tmp_table.q.out @@ -1,20 +1,22 @@ -PREHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc +PREHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_ivtt -POSTHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc +POSTHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_ivtt PREHOOK: query: insert into table acid_ivtt values (1, 109.23, 'mary had a little lamb'), - (429496729, 0.14, 'its fleece was white as snow') + (429496729, 0.14, 'its fleece was white as snow'), + (-29496729, -0.14, 'negative values test') PREHOOK: type: QUERY PREHOOK: Input: default@values__tmp__table__1 PREHOOK: Output: default@acid_ivtt POSTHOOK: query: insert into table acid_ivtt values (1, 109.23, 'mary had a little lamb'), - (429496729, 0.14, 'its fleece was white as snow') + (429496729, 0.14, 'its fleece was white as snow'), + (-29496729, -0.14, 'negative values test') POSTHOOK: type: QUERY POSTHOOK: Input: default@values__tmp__table__1 POSTHOOK: Output: default@acid_ivtt @@ -29,5 +31,6 @@ POSTHOOK: query: select i, de, vc from acid_ivtt order by i POSTHOOK: type: QUERY POSTHOOK: Input: default@acid_ivtt #### A masked pattern was here #### +-29496729 -0.14 negative values test 1 109.23 mary had a little lamb 429496729 0.14 its fleece was white as snow diff --git a/ql/src/test/results/clientpositive/join8.q.out b/ql/src/test/results/clientpositive/join8.q.out index 6435b92..9e13a5e 100644 --- a/ql/src/test/results/clientpositive/join8.q.out +++ b/ql/src/test/results/clientpositive/join8.q.out @@ -90,7 +90,7 @@ STAGE PLANS: predicate: _col2 is null (type: boolean) Statistics: Num rows: 15 Data size: 163 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int), _col3 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(null) (type: int), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 15 Data size: 163 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out new file mode 100644 index 0000000..b3f1e09 --- /dev/null +++ b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out @@ -0,0 +1,121 @@ +PREHOOK: query: explain +select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: key (type: string), hr (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + TableScan + alias: c + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: key (type: string), hr (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + TableScan + alias: a + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: key (type: string), hr (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 + 1 + 2 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +42464 diff --git a/ql/src/test/results/clientpositive/join_view.q.out b/ql/src/test/results/clientpositive/join_view.q.out index 3e54177..ef97996 100644 --- a/ql/src/test/results/clientpositive/join_view.q.out +++ b/ql/src/test/results/clientpositive/join_view.q.out @@ -59,7 +59,7 @@ STAGE PLANS: outputColumnNames: _col1, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col6 (type: int), _col8 (type: string) + expressions: _col1 (type: string), _col6 (type: int), '2011-09-01' (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/keyword_1.q.out b/ql/src/test/results/clientpositive/keyword_1.q.out index 22bf5e3..135d8e5 100644 --- a/ql/src/test/results/clientpositive/keyword_1.q.out +++ b/ql/src/test/results/clientpositive/keyword_1.q.out @@ -21,33 +21,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select user from test_user POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: test_user - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - Select Operator - expressions: user (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: test_user + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: user (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + ListSink PREHOOK: query: show grant user hive_test on table test_user PREHOOK: type: SHOW_GRANT @@ -81,33 +69,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select role from test_user POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: test_user - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - Select Operator - expressions: role (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: test_user + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: role (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + ListSink PREHOOK: query: show grant user hive_test on table test_user PREHOOK: type: SHOW_GRANT diff --git a/ql/src/test/results/clientpositive/lateral_view_ppd.q.out b/ql/src/test/results/clientpositive/lateral_view_ppd.q.out index 756679a..b186192 100644 --- a/ql/src/test/results/clientpositive/lateral_view_ppd.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_ppd.q.out @@ -109,7 +109,7 @@ STAGE PLANS: outputColumnNames: _col1, _col5 Statistics: Num rows: 375 Data size: 3984 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col5 (type: int) + expressions: _col1 (type: string), 1 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 375 Data size: 3984 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -133,7 +133,7 @@ STAGE PLANS: outputColumnNames: _col1, _col5 Statistics: Num rows: 375 Data size: 3984 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col5 (type: int) + expressions: _col1 (type: string), 1 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 375 Data size: 3984 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/limit0.q.out b/ql/src/test/results/clientpositive/limit0.q.out index d047374..3f08bdd 100644 --- a/ql/src/test/results/clientpositive/limit0.q.out +++ b/ql/src/test/results/clientpositive/limit0.q.out @@ -42,19 +42,25 @@ STAGE PLANS: PREHOOK: query: select * from src where key = '238' limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: query: select * from src where key = '238' limit 0 POSTHOOK: type: QUERY +POSTHOOK: Input: default@src #### A masked pattern was here #### PREHOOK: query: select src.key, count(src.value) from src group by src.key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: query: select src.key, count(src.value) from src group by src.key limit 0 POSTHOOK: type: QUERY +POSTHOOK: Input: default@src #### A masked pattern was here #### PREHOOK: query: select * from ( select key from src limit 3) sq1 limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: query: select * from ( select key from src limit 3) sq1 limit 0 POSTHOOK: type: QUERY +POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out b/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out index 781cac3..354d440 100644 --- a/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out +++ b/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out @@ -3,33 +3,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ds (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: srcpart + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select ds from srcpart where hr=11 and ds='2008-04-08' PREHOOK: type: QUERY @@ -555,34 +543,34 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: hr (type: string) outputColumnNames: hr - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: hr (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/limit_pushdown.q.out b/ql/src/test/results/clientpositive/limit_pushdown.q.out index a5a0090..4abef8c 100644 --- a/ql/src/test/results/clientpositive/limit_pushdown.q.out +++ b/ql/src/test/results/clientpositive/limit_pushdown.q.out @@ -631,9 +631,11 @@ STAGE PLANS: PREHOOK: query: select key,value from src order by key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: query: select key,value from src order by key limit 0 POSTHOOK: type: QUERY +POSTHOOK: Input: default@src #### A masked pattern was here #### PREHOOK: query: -- 2MR (applied to last RS) explain diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out index b8fe430..0e47417 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out @@ -424,18 +424,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: fact_daily - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -496,17 +496,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out index 221ee70..85fc092 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out @@ -409,7 +409,7 @@ STAGE PLANS: predicate: (x = 484) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: x (type: int), y (type: string) + expressions: 484 (type: int), y (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/load_dyn_part1.q.out index 4c9ee05..e7dace5 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part1.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part1.q.out @@ -58,11 +58,20 @@ insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, v POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage - Stage-0 depends on stages: Stage-2 + Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 Stage-3 depends on stages: Stage-0 - Stage-4 depends on stages: Stage-2 - Stage-1 depends on stages: Stage-4 - Stage-5 depends on stages: Stage-1 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + Stage-14 depends on stages: Stage-2 , consists of Stage-11, Stage-10, Stage-12 + Stage-11 + Stage-1 depends on stages: Stage-11, Stage-10, Stage-13 + Stage-9 depends on stages: Stage-1 + Stage-10 + Stage-12 + Stage-13 depends on stages: Stage-12 STAGE PLANS: Stage: Stage-2 @@ -78,12 +87,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 Filter Operator predicate: (ds > '2008-04-08') (type: boolean) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE @@ -93,21 +104,21 @@ STAGE PLANS: Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part1 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -129,23 +140,40 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 + + Stage: Stage-6 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-14 + Conditional Operator + + Stage: Stage-11 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-1 Move Operator @@ -160,9 +188,39 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.nzhang_part2 - Stage: Stage-5 + Stage: Stage-9 Stats-Aggr Operator + Stage: Stage-10 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-12 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-13 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + PREHOOK: query: from srcpart insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' diff --git a/ql/src/test/results/clientpositive/load_dyn_part10.q.out b/ql/src/test/results/clientpositive/load_dyn_part10.q.out index d05e4f1..ca388b9 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part10.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part10.q.out @@ -62,23 +62,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part10 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part10 Stage: Stage-0 Move Operator diff --git a/ql/src/test/results/clientpositive/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/load_dyn_part14.q.out index 37ae8de..118d198 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part14.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part14.q.out @@ -55,11 +55,16 @@ select key, value from ( POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-2 depends on stages: Stage-1, Stage-4, Stage-5 - Stage-0 depends on stages: Stage-2 + Stage-2 depends on stages: Stage-1, Stage-9, Stage-10 + Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 Stage-3 depends on stages: Stage-0 - Stage-4 is a root stage - Stage-5 is a root stage + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + Stage-9 is a root stage + Stage-10 is a root stage STAGE PLANS: Stage: Stage-1 @@ -104,12 +109,14 @@ STAGE PLANS: expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: string), _col1 (type: string) + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part14 TableScan Union Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE @@ -117,12 +124,14 @@ STAGE PLANS: expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: string), _col1 (type: string) + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part14 TableScan Union Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE @@ -130,23 +139,23 @@ STAGE PLANS: expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part14 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part14 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -167,6 +176,36 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part14 + + Stage: Stage-6 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part14 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-9 + Map Reduce + Map Operator Tree: + TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator @@ -195,7 +234,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-5 + Stage: Stage-10 Map Reduce Map Operator Tree: TableScan diff --git a/ql/src/test/results/clientpositive/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/load_dyn_part3.q.out index 77ba8aa..3242c3d 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part3.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part3.q.out @@ -60,23 +60,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part3 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part3 Stage: Stage-0 Move Operator diff --git a/ql/src/test/results/clientpositive/load_dyn_part4.q.out b/ql/src/test/results/clientpositive/load_dyn_part4.q.out index 80955e3..d24875f 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part4.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part4.q.out @@ -70,23 +70,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part4 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part4 Stage: Stage-0 Move Operator diff --git a/ql/src/test/results/clientpositive/load_dyn_part5.q.out b/ql/src/test/results/clientpositive/load_dyn_part5.q.out index 0bcc432..e4bc742 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part5.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part5.q.out @@ -43,23 +43,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part5 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part5 Stage: Stage-0 Move Operator diff --git a/ql/src/test/results/clientpositive/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/load_dyn_part8.q.out index a542fa8..dc55eec 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part8.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part8.q.out @@ -115,9 +115,8 @@ STAGE DEPENDENCIES: Stage-2 is a root stage Stage-0 depends on stages: Stage-2 Stage-3 depends on stages: Stage-0 - Stage-4 depends on stages: Stage-2 - Stage-1 depends on stages: Stage-4 - Stage-5 depends on stages: Stage-1 + Stage-1 depends on stages: Stage-2 + Stage-4 depends on stages: Stage-1 STAGE PLANS: Stage: Stage-2 @@ -135,14 +134,34 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - auto parallelism: false +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments defaultdefault + columns.types string:string +#### A masked pattern was here #### + name default.nzhang_part8 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct nzhang_part8 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part8 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Filter Operator isSamplingPred: false predicate: (ds > '2008-04-08') (type: boolean) @@ -153,20 +172,32 @@ STAGE PLANS: Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - GlobalTableId: 0 + GlobalTableId: 2 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Static Partition Specification: ds=2008-12-31/ + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2 - columns.types string,string,string - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + bucket_count -1 + columns key,value + columns.comments defaultdefault + columns.types string:string +#### A masked pattern was here #### + name default.nzhang_part8 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct nzhang_part8 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part8 TotalFiles: 1 - GatherStats: false + GatherStats: true MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### @@ -360,38 +391,6 @@ STAGE PLANS: /srcpart/ds=2008-04-08/hr=12 [srcpart] /srcpart/ds=2008-04-09/hr=11 [srcpart] /srcpart/ds=2008-04-09/hr=12 [srcpart] - Needs Tagging: false - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.nzhang_part8 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct nzhang_part8 { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part8 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false Stage: Stage-0 Move Operator @@ -424,78 +423,6 @@ STAGE PLANS: Stats-Aggr Operator #### A masked pattern was here #### - Stage: Stage-4 - Map Reduce - Map Operator Tree: - TableScan - GatherStats: false - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: -mr-10002 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,string - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types string,string,string - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Truncated Path -> Alias: -#### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 2 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Static Partition Specification: ds=2008-12-31/ - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.nzhang_part8 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct nzhang_part8 { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part8 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false - Stage: Stage-1 Move Operator tables: @@ -523,7 +450,7 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.nzhang_part8 - Stage: Stage-5 + Stage: Stage-4 Stats-Aggr Operator #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/load_dyn_part9.q.out b/ql/src/test/results/clientpositive/load_dyn_part9.q.out index d782880..300f41e 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part9.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part9.q.out @@ -62,23 +62,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part9 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part9 Stage: Stage-0 Move Operator diff --git a/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out b/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out index e77ad1e..536c92b 100644 --- a/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out @@ -275,7 +275,7 @@ STAGE PLANS: src2 TableScan alias: src2 - Statistics: Num rows: 1 Data size: 13 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 9 Data size: 40 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: 0 {value} @@ -1108,7 +1108,7 @@ STAGE PLANS: src2 TableScan alias: src2 - Statistics: Num rows: 1 Data size: 13 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 9 Data size: 40 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: 0 {value} diff --git a/ql/src/test/results/clientpositive/merge3.q.out b/ql/src/test/results/clientpositive/merge3.q.out index 1701be1..a0dc3a8 100644 --- a/ql/src/test/results/clientpositive/merge3.q.out +++ b/ql/src/test/results/clientpositive/merge3.q.out @@ -2447,14 +2447,34 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - auto parallelism: false +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.merge_src_part2 + partition_columns ds + partition_columns.types string + serialization.ddl struct merge_src_part2 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.merge_src_part2 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -2473,7 +2493,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.merge_src_part - numFiles 1 + numFiles 2 numRows 1000 partition_columns ds partition_columns.types string @@ -2518,7 +2538,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.merge_src_part - numFiles 1 + numFiles 2 numRows 1000 partition_columns ds partition_columns.types string @@ -2551,38 +2571,6 @@ STAGE PLANS: Truncated Path -> Alias: /merge_src_part/ds=2008-04-08 [merge_src_part] /merge_src_part/ds=2008-04-09 [merge_src_part] - Needs Tagging: false - Reduce Operator Tree: - Extract - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments - columns.types string:string -#### A masked pattern was here #### - name default.merge_src_part2 - partition_columns ds - partition_columns.types string - serialization.ddl struct merge_src_part2 { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_src_part2 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false Stage: Stage-7 Conditional Operator @@ -4915,8 +4903,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + + sort order: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE tag: -1 @@ -4940,7 +4927,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.merge_src_part - numFiles 1 + numFiles 2 numRows 1000 partition_columns ds partition_columns.types string @@ -4985,7 +4972,7 @@ STAGE PLANS: columns.types string:string #### A masked pattern was here #### name default.merge_src_part - numFiles 1 + numFiles 2 numRows 1000 partition_columns ds partition_columns.types string diff --git a/ql/src/test/results/clientpositive/merge4.q.out b/ql/src/test/results/clientpositive/merge4.q.out index 515395f..f86c21c 100644 --- a/ql/src/test/results/clientpositive/merge4.q.out +++ b/ql/src/test/results/clientpositive/merge4.q.out @@ -37,23 +37,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part Stage: Stage-7 Conditional Operator @@ -2830,12 +2821,14 @@ STAGE PLANS: expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part TableScan Union Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE @@ -2843,23 +2836,14 @@ STAGE PLANS: expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1001 Data size: 10883 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part Stage: Stage-8 Conditional Operator diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out index 06b11dc..86978f3 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out @@ -155,23 +155,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 174 Data size: 34830 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 174 Data size: 34830 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 174 Data size: 34830 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 174 Data size: 34830 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.merge_dynamic_part + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.merge_dynamic_part Stage: Stage-7 Conditional Operator @@ -286,9 +277,9 @@ outputformat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat columns:struct columns { string key, string value} partitioned:true partitionColumns:struct partition_columns { string ds, string hr} -totalNumberFiles:4 +totalNumberFiles:6 totalFileSize:34830 -maxFileSize:11603 -minFileSize:5812 +maxFileSize:5812 +minFileSize:5791 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out index f14d6ca..7fd0bfc 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out @@ -158,23 +158,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), if(((key % 2) = 0), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 9624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.merge_dynamic_part + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.merge_dynamic_part Stage: Stage-7 Conditional Operator diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out index 8a786c8..044b76f 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out @@ -132,23 +132,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), if(((key % 100) = 0), 'a1', 'b1') (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 618 Data size: 5934 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.merge_dynamic_part + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.merge_dynamic_part Stage: Stage-7 Conditional Operator diff --git a/ql/src/test/results/clientpositive/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/metadata_only_queries.q.out index e273570..f5b9bd8 100644 --- a/ql/src/test/results/clientpositive/metadata_only_queries.q.out +++ b/ql/src/test/results/clientpositive/metadata_only_queries.q.out @@ -340,9 +340,11 @@ STAGE PLANS: PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl #### A masked pattern was here #### POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl #### A masked pattern was here #### 9999 9999 1999.8 9999 9999 9999 9999 9999 PREHOOK: query: explain @@ -363,9 +365,11 @@ STAGE PLANS: PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl #### A masked pattern was here #### POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl #### A masked pattern was here #### 65536 65791 4294967296 4294967551 0.009999999776482582 99.9800033569336 0.01 50.0 PREHOOK: query: explain @@ -386,9 +390,11 @@ STAGE PLANS: PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 9489 9489 1897.8 9489 9489 9489 9489 9489 PREHOOK: query: explain @@ -409,9 +415,11 @@ STAGE PLANS: PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 65536 65791 4294967296 4294967551 0.009999999776482582 99.9800033569336 0.01 50.0 PREHOOK: query: explain select count(ts) from stats_tbl_part diff --git a/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out b/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out index 664e065..5be958f 100644 --- a/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out +++ b/ql/src/test/results/clientpositive/metadata_only_queries_with_filters.q.out @@ -160,9 +160,11 @@ STAGE PLANS: PREHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010 PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010 POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 2322 2322 2322 2322 2322 2322 2322 65791 4294967296 99.9800033569336 0.03 PREHOOK: query: explain @@ -183,16 +185,20 @@ STAGE PLANS: PREHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010 PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010 POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 2219 2219 2219 4438 2219 2219 2219 2219 65791 4294967296 99.95999908447266 0.04 PREHOOK: query: select count(*) from stats_tbl_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*) from stats_tbl_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 4541 PREHOOK: query: select count(*)/2 from stats_tbl_part diff --git a/ql/src/test/results/clientpositive/metadataonly1.q.out b/ql/src/test/results/clientpositive/metadataonly1.q.out index b90cb98..a7dc080 100644 --- a/ql/src/test/results/clientpositive/metadataonly1.q.out +++ b/ql/src/test/results/clientpositive/metadataonly1.q.out @@ -42,17 +42,17 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -124,20 +124,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string) auto parallelism: false @@ -192,17 +192,17 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -269,22 +269,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(DISTINCT ds) keys: ds (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -338,17 +338,17 @@ STAGE PLANS: aggregations: count(DISTINCT KEY._col0:0._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -415,20 +415,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -483,17 +483,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -598,20 +598,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string) auto parallelism: false @@ -708,15 +708,15 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: _col0 is not null (type: boolean) - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 @@ -744,18 +744,18 @@ STAGE PLANS: key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE tag: 1 auto parallelism: false TableScan alias: a2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 0 auto parallelism: false Path -> Alias: @@ -1055,23 +1055,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string), hr (type: string) outputColumnNames: ds, hr - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(DISTINCT hr) keys: ds (type: string), hr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -1213,17 +1213,17 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1300,23 +1300,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string), hr (type: string) outputColumnNames: ds, hr - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(hr) keys: ds (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: false @@ -1457,17 +1457,17 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1537,20 +1537,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string) auto parallelism: false @@ -1647,17 +1647,17 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1783,23 +1783,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string), hr (type: string) outputColumnNames: ds, hr - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(DISTINCT hr) keys: ds (type: string), hr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -2027,17 +2027,17 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/nonmr_fetch.q.out b/ql/src/test/results/clientpositive/nonmr_fetch.q.out index 4d56901..87502cf 100644 --- a/ql/src/test/results/clientpositive/nonmr_fetch.q.out +++ b/ql/src/test/results/clientpositive/nonmr_fetch.q.out @@ -93,36 +93,24 @@ POSTHOOK: query: -- negative, select expression explain select key from src limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: 10 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select key from src limit 10 PREHOOK: type: QUERY @@ -1048,33 +1036,21 @@ POSTHOOK: query: -- negative, subq explain select a.* from (select * from src) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: -- negative, join explain select * from src join src src2 on src.key=src2.key diff --git a/ql/src/test/results/clientpositive/nullgroup3.q.out b/ql/src/test/results/clientpositive/nullgroup3.q.out index 81252e4..54657a8 100644 --- a/ql/src/test/results/clientpositive/nullgroup3.q.out +++ b/ql/src/test/results/clientpositive/nullgroup3.q.out @@ -40,31 +40,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tstparttbl - Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: NONE Select Operator - Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -131,31 +131,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tstparttbl2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -230,31 +230,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tstparttbl - Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: NONE Select Operator - Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -329,31 +329,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tstparttbl2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/optimize_nullscan.q.out b/ql/src/test/results/clientpositive/optimize_nullscan.q.out index ed81cd6..fd77ad2 100644 --- a/ql/src/test/results/clientpositive/optimize_nullscan.q.out +++ b/ql/src/test/results/clientpositive/optimize_nullscan.q.out @@ -133,6 +133,93 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### PREHOOK: query: explain extended +select count(key) from srcpart where 1=2 group by key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +select count(key) from srcpart where 1=2 group by key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + srcpart + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTION + count + TOK_TABLE_OR_COL + key + TOK_WHERE + = + 1 + 2 + TOK_GROUPBY + TOK_TABLE_OR_COL + key + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col1 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from srcpart where 1=2 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from srcpart where 1=2 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +#### A masked pattern was here #### +PREHOOK: query: explain extended select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key PREHOOK: type: QUERY POSTHOOK: query: explain extended diff --git a/ql/src/test/results/clientpositive/orc_analyze.q.out b/ql/src/test/results/clientpositive/orc_analyze.q.out index 6e22f97..e718b29 100644 --- a/ql/src/test/results/clientpositive/orc_analyze.q.out +++ b/ql/src/test/results/clientpositive/orc_analyze.q.out @@ -73,9 +73,11 @@ POSTHOOK: Lineage: orc_create_people.start_date SIMPLE [(orc_create_people_stagi POSTHOOK: Lineage: orc_create_people.state SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:state, type:string, comment:null), ] PREHOOK: query: analyze table orc_create_people compute statistics partialscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_create_people PREHOOK: Output: default@orc_create_people POSTHOOK: query: analyze table orc_create_people compute statistics partialscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people PREHOOK: query: desc formatted orc_create_people PREHOOK: type: DESCTABLE @@ -105,7 +107,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3123 + totalSize 3121 #### A masked pattern was here #### # Storage Information @@ -195,7 +197,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3123 + totalSize 3121 #### A masked pattern was here #### # Storage Information @@ -269,11 +271,13 @@ POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_cre POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ] PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_create_people PREHOOK: Output: default@orc_create_people PREHOOK: Output: default@orc_create_people@state=Ca PREHOOK: Output: default@orc_create_people@state=Or POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people POSTHOOK: Output: default@orc_create_people@state=Ca POSTHOOK: Output: default@orc_create_people@state=Or @@ -581,11 +585,13 @@ POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_cre POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ] PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_create_people PREHOOK: Output: default@orc_create_people PREHOOK: Output: default@orc_create_people@state=Ca PREHOOK: Output: default@orc_create_people@state=Or POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people POSTHOOK: Output: default@orc_create_people@state=Ca POSTHOOK: Output: default@orc_create_people@state=Or @@ -618,10 +624,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 4 + numFiles 1 numRows 50 - rawDataSize 21980 - totalSize 4963 + rawDataSize 21950 + totalSize 2024 #### A masked pattern was here #### # Storage Information @@ -663,10 +669,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 4 + numFiles 1 numRows 50 - rawDataSize 22048 - totalSize 5051 + rawDataSize 22050 + totalSize 2043 #### A masked pattern was here #### # Storage Information @@ -771,10 +777,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 4 + numFiles 1 numRows 50 - rawDataSize 21980 - totalSize 4963 + rawDataSize 21950 + totalSize 2024 #### A masked pattern was here #### # Storage Information @@ -816,10 +822,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 4 + numFiles 1 numRows 50 - rawDataSize 22048 - totalSize 5051 + rawDataSize 22050 + totalSize 2043 #### A masked pattern was here #### # Storage Information @@ -942,12 +948,14 @@ POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_create_people PREHOOK: Output: default@orc_create_people PREHOOK: Output: default@orc_create_people@state=Ca PREHOOK: Output: default@orc_create_people@state=OH PREHOOK: Output: default@orc_create_people@state=Or POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people POSTHOOK: Output: default@orc_create_people@state=Ca POSTHOOK: Output: default@orc_create_people@state=OH diff --git a/ql/src/test/results/clientpositive/orc_create.q.out b/ql/src/test/results/clientpositive/orc_create.q.out index 8fc88b3..e845331 100644 --- a/ql/src/test/results/clientpositive/orc_create.q.out +++ b/ql/src/test/results/clientpositive/orc_create.q.out @@ -416,9 +416,9 @@ POSTHOOK: query: SELECT strct from orc_create_complex POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_create_complex #### A masked pattern was here #### -{"a":"five","b":"six"} -{"a":"one","b":"two"} -{"a":"three","b":"four"} +{"A":"five","B":"six"} +{"A":"one","B":"two"} +{"A":"three","B":"four"} PREHOOK: query: CREATE TABLE orc_create_people_staging ( id int, first_name string, diff --git a/ql/src/test/results/clientpositive/orc_merge1.q.out b/ql/src/test/results/clientpositive/orc_merge1.q.out index 07bcdfa..f38709b 100644 --- a/ql/src/test/results/clientpositive/orc_merge1.q.out +++ b/ql/src/test/results/clientpositive/orc_merge1.q.out @@ -139,7 +139,7 @@ Partition Parameters: numFiles 2 numRows 242 rawDataSize 22748 - totalSize 1750 + totalSize 1747 #### A masked pattern was here #### # Storage Information @@ -295,7 +295,7 @@ Partition Parameters: numFiles 1 numRows 242 rawDataSize 22748 - totalSize 1335 + totalSize 1332 #### A masked pattern was here #### # Storage Information @@ -441,7 +441,7 @@ Partition Parameters: numFiles 1 numRows 242 rawDataSize 22748 - totalSize 1626 + totalSize 1623 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/orc_merge2.q.out b/ql/src/test/results/clientpositive/orc_merge2.q.out index 8f26ef0..b927b75e 100644 --- a/ql/src/test/results/clientpositive/orc_merge2.q.out +++ b/ql/src/test/results/clientpositive/orc_merge2.q.out @@ -26,8 +26,13 @@ POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one=' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -40,23 +45,23 @@ STAGE PLANS: expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: int), _col3 (type: int) - sort order: ++ - Map-reduce partition columns: _col2 (type: int), _col3 (type: int) + File Output Operator + compressed: false Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int) - Reduce Operator Tree: - Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge2a + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orcfile_merge2a + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -75,6 +80,26 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator + Stage: Stage-3 + Merge File Operator + Map Operator Tree: + ORC File Merge Operator + merge level: stripe + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + + Stage: Stage-5 + Merge File Operator + Map Operator Tree: + ORC File Merge Operator + merge level: stripe + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + PREHOOK: query: INSERT OVERWRITE TABLE orcfile_merge2a PARTITION (one='1', two, three) SELECT key, value, PMOD(HASH(key), 10) as two, PMOD(HASH(value), 10) as three diff --git a/ql/src/test/results/clientpositive/orc_merge5.q.out b/ql/src/test/results/clientpositive/orc_merge5.q.out index a71edce..840dbf2 100644 --- a/ql/src/test/results/clientpositive/orc_merge5.q.out +++ b/ql/src/test/results/clientpositive/orc_merge5.q.out @@ -90,10 +90,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema PREHOOK: query: -- 3 files total analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 3 files total analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -121,7 +123,7 @@ Table Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -243,10 +245,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema PREHOOK: query: -- 1 file after merging analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 1 file after merging analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -274,7 +278,7 @@ Table Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information @@ -313,9 +317,11 @@ POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(nam POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -343,7 +349,7 @@ Table Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -403,10 +409,12 @@ POSTHOOK: Output: default@orc_merge5b PREHOOK: query: -- 1 file after merging analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 1 file after merging analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -434,7 +442,7 @@ Table Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/orc_merge6.q.out b/ql/src/test/results/clientpositive/orc_merge6.q.out index 69cf6f4..5f51320 100644 --- a/ql/src/test/results/clientpositive/orc_merge6.q.out +++ b/ql/src/test/results/clientpositive/orc_merge6.q.out @@ -108,19 +108,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_ PREHOOK: query: -- 3 files total analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 POSTHOOK: query: -- 3 files total analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) @@ -155,7 +159,7 @@ Partition Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -200,7 +204,7 @@ Partition Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -353,19 +357,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_ PREHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 POSTHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) @@ -400,7 +408,7 @@ Partition Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information @@ -445,7 +453,7 @@ Partition Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information @@ -512,18 +520,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merg POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) @@ -558,7 +570,7 @@ Partition Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -603,7 +615,7 @@ Partition Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -689,19 +701,23 @@ POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 POSTHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) @@ -736,7 +752,7 @@ Partition Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information @@ -781,7 +797,7 @@ Partition Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/orc_merge7.q.out b/ql/src/test/results/clientpositive/orc_merge7.q.out index f6058fe..2f506cb 100644 --- a/ql/src/test/results/clientpositive/orc_merge7.q.out +++ b/ql/src/test/results/clientpositive/orc_merge7.q.out @@ -141,19 +141,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc PREHOOK: query: -- 3 files total analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: -- 3 files total analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -187,7 +191,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 255 - totalSize 521 + totalSize 513 #### A masked pattern was here #### # Storage Information @@ -231,7 +235,7 @@ Partition Parameters: numFiles 2 numRows 2 rawDataSize 510 - totalSize 1058 + totalSize 1044 #### A masked pattern was here #### # Storage Information @@ -418,19 +422,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc PREHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -464,7 +472,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 255 - totalSize 521 + totalSize 513 #### A masked pattern was here #### # Storage Information @@ -508,7 +516,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 510 - totalSize 852 + totalSize 838 #### A masked pattern was here #### # Storage Information @@ -614,18 +622,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_mer POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -659,7 +671,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 255 - totalSize 521 + totalSize 513 #### A masked pattern was here #### # Storage Information @@ -703,7 +715,7 @@ Partition Parameters: numFiles 2 numRows 2 rawDataSize 510 - totalSize 1058 + totalSize 1044 #### A masked pattern was here #### # Storage Information @@ -791,19 +803,23 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -837,7 +853,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 255 - totalSize 521 + totalSize 513 #### A masked pattern was here #### # Storage Information @@ -881,7 +897,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 510 - totalSize 852 + totalSize 838 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out index e2d634b..cc81ead 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out @@ -141,10 +141,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema PREHOOK: query: -- 5 files total analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 5 files total analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -172,7 +174,7 @@ Table Parameters: numFiles 5 numRows 15 rawDataSize 3825 - totalSize 2862 + totalSize 2877 #### A masked pattern was here #### # Storage Information @@ -219,10 +221,12 @@ POSTHOOK: Output: default@orc_merge5b PREHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -250,7 +254,7 @@ Table Parameters: numFiles 3 numRows 15 rawDataSize 3825 - totalSize 2325 + totalSize 2340 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out index c32fbf6..c944eb6 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out @@ -200,18 +200,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_mer POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -245,7 +249,7 @@ Partition Parameters: numFiles 4 numRows 4 rawDataSize 1020 - totalSize 2092 + totalSize 2060 #### A masked pattern was here #### # Storage Information @@ -289,7 +293,7 @@ Partition Parameters: numFiles 4 numRows 8 rawDataSize 2040 - totalSize 2204 + totalSize 2188 #### A masked pattern was here #### # Storage Information @@ -385,18 +389,22 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -430,7 +438,7 @@ Partition Parameters: numFiles 3 numRows 4 rawDataSize 1020 - totalSize 1851 + totalSize 1819 #### A masked pattern was here #### # Storage Information @@ -474,7 +482,7 @@ Partition Parameters: numFiles 3 numRows 8 rawDataSize 2040 - totalSize 1944 + totalSize 1928 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/parquet_create.q.out b/ql/src/test/results/clientpositive/parquet_create.q.out index 58ea1f2..2a94693 100644 --- a/ql/src/test/results/clientpositive/parquet_create.q.out +++ b/ql/src/test/results/clientpositive/parquet_create.q.out @@ -181,6 +181,6 @@ POSTHOOK: query: SELECT strct from parquet_create POSTHOOK: type: QUERY POSTHOOK: Input: default@parquet_create #### A masked pattern was here #### -{"a":"one","b":"two"} -{"a":"three","b":"four"} -{"a":"five","b":"six"} +{"A":"one","B":"two"} +{"A":"three","B":"four"} +{"A":"five","B":"six"} diff --git a/ql/src/test/results/clientpositive/parquet_types.q.out b/ql/src/test/results/clientpositive/parquet_types.q.out index 803a826..275897c 100644 --- a/ql/src/test/results/clientpositive/parquet_types.q.out +++ b/ql/src/test/results/clientpositive/parquet_types.q.out @@ -15,9 +15,14 @@ PREHOOK: query: CREATE TABLE parquet_types_staging ( cstring1 string, t timestamp, cchar char(5), - cvarchar varchar(10) + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_types_staging @@ -30,9 +35,14 @@ POSTHOOK: query: CREATE TABLE parquet_types_staging ( cstring1 string, t timestamp, cchar char(5), - cvarchar varchar(10) + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +COLLECTION ITEMS TERMINATED BY ',' +MAP KEYS TERMINATED BY ':' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_types_staging @@ -45,7 +55,10 @@ PREHOOK: query: CREATE TABLE parquet_types ( cstring1 string, t timestamp, cchar char(5), - cvarchar varchar(10) + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -59,7 +72,10 @@ POSTHOOK: query: CREATE TABLE parquet_types ( cstring1 string, t timestamp, cchar char(5), - cvarchar varchar(10) + cvarchar varchar(10), + m1 map, + l1 array, + st1 struct ) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -88,6 +104,9 @@ POSTHOOK: Lineage: parquet_types.csmallint SIMPLE [(parquet_types_staging)parque POSTHOOK: Lineage: parquet_types.cstring1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cstring1, type:string, comment:null), ] POSTHOOK: Lineage: parquet_types.ctinyint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] POSTHOOK: Lineage: parquet_types.cvarchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ] +POSTHOOK: Lineage: parquet_types.l1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:l1, type:array, comment:null), ] +POSTHOOK: Lineage: parquet_types.m1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:m1, type:map, comment:null), ] +POSTHOOK: Lineage: parquet_types.st1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:st1, type:struct, comment:null), ] POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ] PREHOOK: query: SELECT * FROM parquet_types PREHOOK: type: QUERY @@ -97,27 +116,56 @@ POSTHOOK: query: SELECT * FROM parquet_types POSTHOOK: type: QUERY POSTHOOK: Input: default@parquet_types #### A masked pattern was here #### -100 1 1 1.0 0.0 abc 2011-01-01 01:01:01.111111111 a a -101 2 2 1.1 0.3 def 2012-02-02 02:02:02.222222222 ab ab -102 3 3 1.2 0.6 ghi 2013-03-03 03:03:03.333333333 abc abc -103 1 4 1.3 0.9 jkl 2014-04-04 04:04:04.444444444 abcd abcd -104 2 5 1.4 1.2 mno 2015-05-05 05:05:05.555555555 abcde abcde -105 3 1 1.0 1.5 pqr 2016-06-06 06:06:06.666666666 abcde abcdef -106 1 2 1.1 1.8 stu 2017-07-07 07:07:07.777777777 abcde abcdefg -107 2 3 1.2 2.1 vwx 2018-08-08 08:08:08.888888888 bcdef abcdefgh -108 3 4 1.3 2.4 yza 2019-09-09 09:09:09.999999999 cdefg abcdefghij -109 1 5 1.4 2.7 bcd 2020-10-10 10:10:10.101010101 klmno abcdedef -110 2 1 1.0 3.0 efg 2021-11-11 11:11:11.111111111 pqrst abcdede -111 3 2 1.1 3.3 hij 2022-12-12 12:12:12.121212121 nopqr abcded -112 1 3 1.2 3.6 klm 2023-01-02 13:13:13.131313131 opqrs abcdd -113 2 4 1.3 3.9 nop 2024-02-02 14:14:14.141414141 pqrst abc -114 3 5 1.4 4.2 qrs 2025-03-03 15:15:15.151515151 qrstu b -115 1 1 1.0 4.5 tuv 2026-04-04 16:16:16.161616161 rstuv abcded -116 2 2 1.1 4.8 wxy 2027-05-05 17:17:17.171717171 stuvw abcded -117 3 3 1.2 5.1 zab 2028-06-06 18:18:18.181818181 tuvwx abcded -118 1 4 1.3 5.4 cde 2029-07-07 19:19:19.191919191 uvwzy abcdede -119 2 5 1.4 5.7 fgh 2030-08-08 20:20:20.202020202 vwxyz abcdede -120 3 1 1.0 6.0 ijk 2031-09-09 21:21:21.212121212 wxyza abcde +100 1 1 1.0 0.0 abc 2011-01-01 01:01:01.111111111 a a {"k1":"v1"} [101,200] {"c1":10,"c2":"a"} +101 2 2 1.1 0.3 def 2012-02-02 02:02:02.222222222 ab ab {"k2":"v2"} [102,200] {"c1":10,"c2":"d"} +102 3 3 1.2 0.6 ghi 2013-03-03 03:03:03.333333333 abc abc {"k3":"v3"} [103,200] {"c1":10,"c2":"g"} +103 1 4 1.3 0.9 jkl 2014-04-04 04:04:04.444444444 abcd abcd {"k4":"v4"} [104,200] {"c1":10,"c2":"j"} +104 2 5 1.4 1.2 mno 2015-05-05 05:05:05.555555555 abcde abcde {"k5":"v5"} [105,200] {"c1":10,"c2":"m"} +105 3 1 1.0 1.5 pqr 2016-06-06 06:06:06.666666666 abcde abcdef {"k6":"v6"} [106,200] {"c1":10,"c2":"p"} +106 1 2 1.1 1.8 stu 2017-07-07 07:07:07.777777777 abcde abcdefg {"k7":"v7"} [107,200] {"c1":10,"c2":"s"} +107 2 3 1.2 2.1 vwx 2018-08-08 08:08:08.888888888 bcdef abcdefgh {"k8":"v8"} [108,200] {"c1":10,"c2":"v"} +108 3 4 1.3 2.4 yza 2019-09-09 09:09:09.999999999 cdefg abcdefghij {"k9":"v9"} [109,200] {"c1":10,"c2":"y"} +109 1 5 1.4 2.7 bcd 2020-10-10 10:10:10.101010101 klmno abcdedef {"k10":"v10"} [110,200] {"c1":10,"c2":"b"} +110 2 1 1.0 3.0 efg 2021-11-11 11:11:11.111111111 pqrst abcdede {"k11":"v11"} [111,200] {"c1":10,"c2":"e"} +111 3 2 1.1 3.3 hij 2022-12-12 12:12:12.121212121 nopqr abcded {"k12":"v12"} [112,200] {"c1":10,"c2":"h"} +112 1 3 1.2 3.6 klm 2023-01-02 13:13:13.131313131 opqrs abcdd {"k13":"v13"} [113,200] {"c1":10,"c2":"k"} +113 2 4 1.3 3.9 nop 2024-02-02 14:14:14.141414141 pqrst abc {"k14":"v14"} [114,200] {"c1":10,"c2":"n"} +114 3 5 1.4 4.2 qrs 2025-03-03 15:15:15.151515151 qrstu b {"k15":"v15"} [115,200] {"c1":10,"c2":"q"} +115 1 1 1.0 4.5 qrs 2026-04-04 16:16:16.161616161 rstuv abcded {"k16":"v16"} [116,200] {"c1":10,"c2":"q"} +116 2 2 1.1 4.8 wxy 2027-05-05 17:17:17.171717171 stuvw abcded {"k17":"v17"} [117,200] {"c1":10,"c2":"w"} +117 3 3 1.2 5.1 zab 2028-06-06 18:18:18.181818181 tuvwx abcded {"k18":"v18"} [118,200] {"c1":10,"c2":"z"} +118 1 4 1.3 5.4 cde 2029-07-07 19:19:19.191919191 uvwzy abcdede {"k19":"v19"} [119,200] {"c1":10,"c2":"c"} +119 2 5 1.4 5.7 fgh 2030-08-08 20:20:20.202020202 vwxyz abcdede {"k20":"v20"} [120,200] {"c1":10,"c2":"f"} +120 3 1 1.0 6.0 ijk 2031-09-09 21:21:21.212121212 wxyza abcde {"k21":"v21"} [121,200] {"c1":10,"c2":"i"} +PREHOOK: query: SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types +PREHOOK: type: QUERY +PREHOOK: Input: default@parquet_types +#### A masked pattern was here #### +POSTHOOK: query: SELECT cchar, LENGTH(cchar), cvarchar, LENGTH(cvarchar) FROM parquet_types +POSTHOOK: type: QUERY +POSTHOOK: Input: default@parquet_types +#### A masked pattern was here #### +a 1 a 3 +ab 2 ab 3 +abc 3 abc 3 +abcd 4 abcd 4 +abcde 5 abcde 5 +abcde 5 abcdef 6 +abcde 5 abcdefg 7 +bcdef 5 abcdefgh 8 +cdefg 5 abcdefghij 10 +klmno 5 abcdedef 8 +pqrst 5 abcdede 7 +nopqr 5 abcded 6 +opqrs 5 abcdd 5 +pqrst 5 abc 3 +qrstu 5 b 1 +rstuv 5 abcded 6 +stuvw 5 abcded 6 +tuvwx 5 abcded 6 +uvwzy 5 abcdede 7 +vwxyz 5 abcdede 7 +wxyza 5 abcde 5 PREHOOK: query: SELECT ctinyint, MAX(cint), MIN(csmallint), diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out index 2537f24..b9cdde1 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out @@ -1,8 +1,12 @@ -PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: query: -- SORT_BEFORE_DIFF + +create table partition_test_partitioned(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) +POSTHOOK: query: -- SORT_BEFORE_DIFF + +create table partition_test_partitioned(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partition_test_partitioned @@ -57,33 +61,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select *, BLOCK__OFFSET__INSIDE__FILE from partition_test_partitioned where dt >=100 and dt <= 102 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: partition_test_partitioned - Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), dt (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: partition_test_partitioned + Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), dt (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 75 Data size: 548 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select * from partition_test_partitioned where dt >=100 and dt <= 102 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/ppd_constant_where.q.out b/ql/src/test/results/clientpositive/ppd_constant_where.q.out index 419c5f7..7b6d1a2 100644 --- a/ql/src/test/results/clientpositive/ppd_constant_where.q.out +++ b/ql/src/test/results/clientpositive/ppd_constant_where.q.out @@ -16,31 +16,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out index f51b5a3..e8822b1 100644 --- a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out +++ b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out @@ -145,7 +145,7 @@ STAGE PLANS: predicate: (_col7 = 3) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col6 (type: int), _col7 (type: int) + expressions: _col0 (type: int), _col6 (type: int), 3 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppr_pushdown3.q.out b/ql/src/test/results/clientpositive/ppr_pushdown3.q.out index 1beca6c..7b36b95 100644 --- a/ql/src/test/results/clientpositive/ppr_pushdown3.q.out +++ b/ql/src/test/results/clientpositive/ppr_pushdown3.q.out @@ -2132,33 +2132,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select key from srcpart POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: select key from srcpart PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out index 0e7c4af..f7da06c 100644 --- a/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out +++ b/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx.q.out @@ -306,6 +306,7 @@ group by l_shipdate order by l_shipdate PREHOOK: type: QUERY PREHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__ +PREHOOK: Input: default@lineitem #### A masked pattern was here #### POSTHOOK: query: select l_shipdate, count(l_shipdate) from lineitem @@ -313,6 +314,7 @@ group by l_shipdate order by l_shipdate POSTHOOK: type: QUERY POSTHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__ +POSTHOOK: Input: default@lineitem #### A masked pattern was here #### 1992-04-27 1 1992-07-02 1 @@ -654,6 +656,7 @@ group by year(l_shipdate), month(l_shipdate) order by year, month PREHOOK: type: QUERY PREHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__ +PREHOOK: Input: default@lineitem #### A masked pattern was here #### POSTHOOK: query: select year(l_shipdate) as year, month(l_shipdate) as month, @@ -663,6 +666,7 @@ group by year(l_shipdate), month(l_shipdate) order by year, month POSTHOOK: type: QUERY POSTHOOK: Input: default@default__lineitem_lineitem_lshipdate_idx__ +POSTHOOK: Input: default@lineitem #### A masked pattern was here #### 1992 4 1 1992 7 3 @@ -2628,10 +2632,12 @@ STAGE PLANS: PREHOOK: query: select key, count(key) from tbl group by key order by key PREHOOK: type: QUERY PREHOOK: Input: default@default__tbl_tbl_key_idx__ +PREHOOK: Input: default@tbl #### A masked pattern was here #### POSTHOOK: query: select key, count(key) from tbl group by key order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@default__tbl_tbl_key_idx__ +POSTHOOK: Input: default@tbl #### A masked pattern was here #### 1 1 2 3 diff --git a/ql/src/test/results/clientpositive/query_properties.q.out b/ql/src/test/results/clientpositive/query_properties.q.out index 47f8d8c..5684f0d 100644 --- a/ql/src/test/results/clientpositive/query_properties.q.out +++ b/ql/src/test/results/clientpositive/query_properties.q.out @@ -1,5 +1,6 @@ PREHOOK: query: select * from src a join src b on a.key = b.key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: true Has Group By: false @@ -11,6 +12,7 @@ Has Distribute By: false Has Cluster By: false PREHOOK: query: select * from src group by src.key, src.value limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: false Has Group By: true @@ -22,6 +24,7 @@ Has Distribute By: false Has Cluster By: false PREHOOK: query: select * from src order by src.key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: false Has Group By: false @@ -33,6 +36,7 @@ Has Distribute By: false Has Cluster By: false PREHOOK: query: select * from src sort by src.key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: false Has Group By: false @@ -44,6 +48,7 @@ Has Distribute By: false Has Cluster By: false PREHOOK: query: select a.key, sum(b.value) from src a join src b on a.key = b.key group by a.key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: true Has Group By: true @@ -55,6 +60,7 @@ Has Distribute By: false Has Cluster By: false PREHOOK: query: select transform(*) using 'cat' from src limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: false Has Group By: false @@ -66,6 +72,7 @@ Has Distribute By: false Has Cluster By: false PREHOOK: query: select * from src distribute by src.key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: false Has Group By: false @@ -77,6 +84,7 @@ Has Distribute By: true Has Cluster By: false PREHOOK: query: select * from src cluster by src.key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: false Has Group By: false @@ -88,6 +96,7 @@ Has Distribute By: false Has Cluster By: true PREHOOK: query: select key, sum(value) from (select a.key as key, b.value as value from src a join src b on a.key = b.key) c group by key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: true Has Group By: true @@ -99,6 +108,7 @@ Has Distribute By: false Has Cluster By: false PREHOOK: query: select * from src a join src b on a.key = b.key order by a.key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: true Has Group By: false @@ -110,6 +120,7 @@ Has Distribute By: false Has Cluster By: false PREHOOK: query: select * from src a join src b on a.key = b.key distribute by a.key sort by a.key, b.value limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### Has Join: true Has Group By: false diff --git a/ql/src/test/results/clientpositive/quote1.q.out b/ql/src/test/results/clientpositive/quote1.q.out index a17bac9..f111265 100644 --- a/ql/src/test/results/clientpositive/quote1.q.out +++ b/ql/src/test/results/clientpositive/quote1.q.out @@ -108,18 +108,24 @@ POSTHOOK: query: EXPLAIN SELECT `int`.`location`, `int`.`type`, `int`.`table` FROM dest1 `int` WHERE `int`.`table` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: int + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (table = '2008-04-08') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: location (type: int), type (type: string), '2008-04-08' (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + ListSink PREHOOK: query: FROM src INSERT OVERWRITE TABLE dest1 PARTITION(`table`='2008-04-08') SELECT src.key as `partition`, src.value as `from` WHERE src.key >= 200 and src.key < 300 diff --git a/ql/src/test/results/clientpositive/quote2.q.out b/ql/src/test/results/clientpositive/quote2.q.out index 129e090..086960a 100644 --- a/ql/src/test/results/clientpositive/quote2.q.out +++ b/ql/src/test/results/clientpositive/quote2.q.out @@ -8,6 +8,7 @@ SELECT 'abc\\\\\'', "abc\\\\\"", 'abc\\\\\\', "abc\\\\\\", 'abc""""\\', "abc''''\\", + 'mysql_%\\_\%', 'mysql\\\_\\\\\%', "awk '{print NR\"\\t\"$0}'", 'tab\ttab', "tab\ttab" FROM src @@ -23,6 +24,7 @@ SELECT 'abc\\\\\'', "abc\\\\\"", 'abc\\\\\\', "abc\\\\\\", 'abc""""\\', "abc''''\\", + 'mysql_%\\_\%', 'mysql\\\_\\\\\%', "awk '{print NR\"\\t\"$0}'", 'tab\ttab', "tab\ttab" FROM src @@ -40,12 +42,12 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: 'abc' (type: string), 'abc' (type: string), 'abc'' (type: string), 'abc"' (type: string), 'abc\' (type: string), 'abc\' (type: string), 'abc\'' (type: string), 'abc\"' (type: string), 'abc\\' (type: string), 'abc\\' (type: string), 'abc\\'' (type: string), 'abc\\"' (type: string), 'abc\\\' (type: string), 'abc\\\' (type: string), 'abc""""\' (type: string), 'abc''''\' (type: string), 'awk '{print NR"\t"$0}'' (type: string), 'tab tab' (type: string), 'tab tab' (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18 - Statistics: Num rows: 500 Data size: 857000 Basic stats: COMPLETE Column stats: COMPLETE + expressions: 'abc' (type: string), 'abc' (type: string), 'abc'' (type: string), 'abc"' (type: string), 'abc\' (type: string), 'abc\' (type: string), 'abc\'' (type: string), 'abc\"' (type: string), 'abc\\' (type: string), 'abc\\' (type: string), 'abc\\'' (type: string), 'abc\\"' (type: string), 'abc\\\' (type: string), 'abc\\\' (type: string), 'abc""""\' (type: string), 'abc''''\' (type: string), 'mysql_%\_\%' (type: string), 'mysql\\_\\\%' (type: string), 'awk '{print NR"\t"$0}'' (type: string), 'tab tab' (type: string), 'tab tab' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 + Statistics: Num rows: 500 Data size: 952500 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 1714 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 1905 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: SELECT @@ -57,6 +59,7 @@ PREHOOK: query: SELECT 'abc\\\\\'', "abc\\\\\"", 'abc\\\\\\', "abc\\\\\\", 'abc""""\\', "abc''''\\", + 'mysql_%\\_\%', 'mysql\\\_\\\\\%', "awk '{print NR\"\\t\"$0}'", 'tab\ttab', "tab\ttab" FROM src @@ -73,6 +76,7 @@ POSTHOOK: query: SELECT 'abc\\\\\'', "abc\\\\\"", 'abc\\\\\\', "abc\\\\\\", 'abc""""\\', "abc''''\\", + 'mysql_%\\_\%', 'mysql\\\_\\\\\%', "awk '{print NR\"\\t\"$0}'", 'tab\ttab', "tab\ttab" FROM src @@ -80,4 +84,4 @@ LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -abc abc abc' abc" abc\ abc\ abc\' abc\" abc\\ abc\\ abc\\' abc\\" abc\\\ abc\\\ abc""""\ abc''''\ awk '{print NR"\t"$0}' tab tab tab tab +abc abc abc' abc" abc\ abc\ abc\' abc\" abc\\ abc\\ abc\\' abc\\" abc\\\ abc\\\ abc""""\ abc''''\ mysql_%\_\% mysql\\_\\\% awk '{print NR"\t"$0}' tab tab tab tab diff --git a/ql/src/test/results/clientpositive/quotedid_basic.q.out b/ql/src/test/results/clientpositive/quotedid_basic.q.out index 612a46e..46ec84b 100644 --- a/ql/src/test/results/clientpositive/quotedid_basic.q.out +++ b/ql/src/test/results/clientpositive/quotedid_basic.q.out @@ -30,33 +30,21 @@ PREHOOK: type: QUERY POSTHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - Select Operator - expressions: x+1 (type: string), y&y (type: string), !@#$%^&*()_q (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: t1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: x+1 (type: string), y&y (type: string), !@#$%^&*()_q (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + ListSink PREHOOK: query: explain select `x+1`, `y&y`, `!@#$%^&*()_q` from t1 where `!@#$%^&*()_q` = '1' PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/regex_col.q.out b/ql/src/test/results/clientpositive/regex_col.q.out index c77ccf1..c42ba66 100644 --- a/ql/src/test/results/clientpositive/regex_col.q.out +++ b/ql/src/test/results/clientpositive/regex_col.q.out @@ -28,33 +28,21 @@ POSTHOOK: query: EXPLAIN SELECT `..` FROM srcpart POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ds (type: string), hr (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string), hr (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: EXPLAIN SELECT srcpart.`..` FROM srcpart @@ -63,33 +51,21 @@ POSTHOOK: query: EXPLAIN SELECT srcpart.`..` FROM srcpart POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ds (type: string), hr (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 736000 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string), hr (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: EXPLAIN SELECT `..` FROM srcpart a JOIN srcpart b @@ -289,33 +265,21 @@ POSTHOOK: query: EXPLAIN SELECT `.e.` FROM srcpart POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: EXPLAIN SELECT `d.*` FROM srcpart @@ -324,33 +288,21 @@ POSTHOOK: query: EXPLAIN SELECT `d.*` FROM srcpart POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ds (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: EXPLAIN SELECT `(ds)?+.+` FROM srcpart @@ -359,33 +311,21 @@ POSTHOOK: query: EXPLAIN SELECT `(ds)?+.+` FROM srcpart POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string), hr (type: string) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string), hr (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: EXPLAIN SELECT `(ds|hr)?+.+` FROM srcpart ORDER BY key, value LIMIT 10 diff --git a/ql/src/test/results/clientpositive/schemeAuthority.q.out b/ql/src/test/results/clientpositive/schemeAuthority.q.out index 1daf7c7..9a6019c 100644 --- a/ql/src/test/results/clientpositive/schemeAuthority.q.out +++ b/ql/src/test/results/clientpositive/schemeAuthority.q.out @@ -61,8 +61,8 @@ POSTHOOK: Input: default@dynpart POSTHOOK: Input: default@dynpart@value=0 POSTHOOK: Input: default@dynpart@value=1 #### A masked pattern was here #### -20 10 +20 PREHOOK: query: select key from src where (key = 10) order by key PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/schemeAuthority2.q.out b/ql/src/test/results/clientpositive/schemeAuthority2.q.out index ad67a4d..60913f2 100644 --- a/ql/src/test/results/clientpositive/schemeAuthority2.q.out +++ b/ql/src/test/results/clientpositive/schemeAuthority2.q.out @@ -48,6 +48,6 @@ POSTHOOK: Input: default@dynpart POSTHOOK: Input: default@dynpart@value=0/value2=clusterA POSTHOOK: Input: default@dynpart@value=0/value2=clusterB #### A masked pattern was here #### -clusterB 20 clusterA 10 +clusterB 20 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/select_dummy_source.q.out b/ql/src/test/results/clientpositive/select_dummy_source.q.out index 651ce1d..08311f0 100644 --- a/ql/src/test/results/clientpositive/select_dummy_source.q.out +++ b/ql/src/test/results/clientpositive/select_dummy_source.q.out @@ -5,34 +5,22 @@ POSTHOOK: query: explain select 'a', 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: _dummy_table - Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator - expressions: 'a' (type: string), 100 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: 'a' (type: string), 100 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + ListSink PREHOOK: query: select 'a', 100 PREHOOK: type: QUERY @@ -52,34 +40,22 @@ explain select 1 + 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: _dummy_table - Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator - expressions: 2 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: 2 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + ListSink PREHOOK: query: select 1 + 1 PREHOOK: type: QUERY @@ -265,34 +241,22 @@ explain select 2 + 3,x from (select 1 + 2 x) X POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: _dummy_table - Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator - expressions: 5 (type: int), (1 + 2) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: 5 (type: int), 3 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + ListSink PREHOOK: query: select 2 + 3,x from (select 1 + 2 x) X PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/serde_user_properties.q.out b/ql/src/test/results/clientpositive/serde_user_properties.q.out index 5839880..be5f59b 100644 --- a/ql/src/test/results/clientpositive/serde_user_properties.q.out +++ b/ql/src/test/results/clientpositive/serde_user_properties.q.out @@ -24,97 +24,22 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src - Truncated Path -> Alias: - /src [src] - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: explain extended select a.key from src a PREHOOK: type: QUERY @@ -141,97 +66,22 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src - Truncated Path -> Alias: - /src [a] - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: explain extended select a.key from src tablesample(1 percent) a PREHOOK: type: QUERY @@ -384,99 +234,22 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - user.defined.key some.value - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - user.defined.key some.value - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src - Truncated Path -> Alias: - /src [src] - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: explain extended select key from src ('user.defined.key'='some.value') tablesample(1 percent) PREHOOK: type: QUERY @@ -636,99 +409,22 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0 - columns.types string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - user.defined.key some.value - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - user.defined.key some.value - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src - Truncated Path -> Alias: - /src [a] - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: explain extended select a.key from src ('user.defined.key'='some.value') tablesample(1 percent) a PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out index 9ec8fab..3e64d36 100644 --- a/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out +++ b/ql/src/test/results/clientpositive/spark/alter_merge_stats_orc.q.out @@ -68,9 +68,11 @@ value string #### A masked pattern was here #### PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_stat PREHOOK: Output: default@src_orc_merge_test_stat POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: desc formatted src_orc_merge_test_stat PREHOOK: type: DESCTABLE @@ -118,9 +120,11 @@ POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_stat PREHOOK: Output: default@src_orc_merge_test_stat POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: desc formatted src_orc_merge_test_stat PREHOOK: type: DESCTABLE @@ -249,8 +253,6 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 3 - numRows -1 - rawDataSize -1 totalSize 7488 #### A masked pattern was here #### @@ -266,10 +268,12 @@ Storage Desc Params: serialization.format 1 PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') @@ -323,10 +327,12 @@ POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') diff --git a/ql/src/test/results/clientpositive/spark/bucket2.q.out b/ql/src/test/results/clientpositive/spark/bucket2.q.out index 6e6f990..ca961bd 100644 --- a/ql/src/test/results/clientpositive/spark/bucket2.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket2.q.out @@ -52,16 +52,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string), _col1 (type: string) auto parallelism: false @@ -118,17 +115,14 @@ STAGE PLANS: Needs Tagging: false Reduce Operator Tree: Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -210,14 +204,11 @@ STAGE PLANS: Processor Tree: TableScan alias: s - Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from bucket2_1 tablesample (bucket 1 out of 2) s diff --git a/ql/src/test/results/clientpositive/spark/bucket3.q.out b/ql/src/test/results/clientpositive/spark/bucket3.q.out index ee4c173..6070d70 100644 --- a/ql/src/test/results/clientpositive/spark/bucket3.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket3.q.out @@ -56,16 +56,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string), _col1 (type: string) auto parallelism: false @@ -122,18 +119,15 @@ STAGE PLANS: Needs Tagging: false Reduce Operator Tree: Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 2 Static Partition Specification: ds=1/ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -234,14 +228,11 @@ STAGE PLANS: Processor Tree: TableScan alias: s - Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string), ds (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from bucket3_1 tablesample (bucket 1 out of 2) s where ds = '1' diff --git a/ql/src/test/results/clientpositive/spark/bucket4.q.out b/ql/src/test/results/clientpositive/spark/bucket4.q.out index 3ea17e2..d1625b9 100644 --- a/ql/src/test/results/clientpositive/spark/bucket4.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket4.q.out @@ -48,17 +48,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToInteger(_col0) (type: int) sort order: + Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string), _col1 (type: string) auto parallelism: false @@ -115,17 +112,14 @@ STAGE PLANS: Needs Tagging: false Reduce Operator Tree: Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -209,14 +203,11 @@ STAGE PLANS: Processor Tree: TableScan alias: s - Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from bucket4_1 tablesample (bucket 1 out of 2) s diff --git a/ql/src/test/results/clientpositive/spark/column_access_stats.q.out b/ql/src/test/results/clientpositive/spark/column_access_stats.q.out index 6c1c665..738ae36 100644 --- a/ql/src/test/results/clientpositive/spark/column_access_stats.q.out +++ b/ql/src/test/results/clientpositive/spark/column_access_stats.q.out @@ -83,36 +83,19 @@ PREHOOK: query: -- More complicated select queries EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1 PREHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Spark -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: t1 + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + ListSink PREHOOK: query: SELECT key FROM (SELECT key, val FROM T1) subq1 PREHOOK: type: QUERY @@ -130,36 +113,19 @@ Columns:key PREHOOK: query: EXPLAIN SELECT k FROM (SELECT key as k, val as v FROM T1) subq1 PREHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Spark -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: t1 + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + ListSink PREHOOK: query: SELECT k FROM (SELECT key as k, val as v FROM T1) subq1 PREHOOK: type: QUERY @@ -418,28 +384,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -449,14 +409,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -541,28 +498,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: ((val = 3) and key is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: ((val = 3) and key is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -572,14 +523,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), '3' (type: string), _col5 (type: string), '3' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -631,36 +579,28 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: ((key = 6) and val is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: val (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: ((key = 5) and val is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: val (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -670,14 +610,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -744,50 +681,39 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t3 - Statistics: Num rows: 0 Data size: 35 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: val (type: string) Map 3 Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Map 5 Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -797,14 +723,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -818,16 +741,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/spark/count.q.out b/ql/src/test/results/clientpositive/spark/count.q.out index 4031360..b391785 100644 --- a/ql/src/test/results/clientpositive/spark/count.q.out +++ b/ql/src/test/results/clientpositive/spark/count.q.out @@ -48,22 +48,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: abcd - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: int), b (type: int), c (type: int), d (type: int) outputColumnNames: a, b, c, d - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT b), count(DISTINCT c), sum(d) keys: a (type: int), b (type: int), c (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE value expressions: _col5 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -72,14 +68,11 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -122,21 +115,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: abcd - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: int), b (type: int), c (type: int), d (type: int) outputColumnNames: a, b, c, d - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1), count(), count(a), count(b), count(c), count(d), count(DISTINCT a), count(DISTINCT b), count(DISTINCT c), count(DISTINCT d), count(DISTINCT a, b), count(DISTINCT b, c), count(DISTINCT c, d), count(DISTINCT a, d), count(DISTINCT a, c), count(DISTINCT b, d), count(DISTINCT a, b, c), count(DISTINCT b, c, d), count(DISTINCT a, c, d), count(DISTINCT a, b, d), count(DISTINCT a, b, c, d) keys: a (type: int), b (type: int), c (type: int), d (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24 - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: int) sort order: ++++ - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE value expressions: _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -144,14 +133,11 @@ STAGE PLANS: aggregations: count(VALUE._col0), count(VALUE._col1), count(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY._col0:14._col3) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: bigint), _col19 (type: bigint), _col20 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -191,16 +177,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: abcd - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: int), b (type: int), c (type: int), d (type: int) outputColumnNames: a, b, c, d - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: a (type: int), b (type: int), c (type: int) sort order: +++ Map-reduce partition columns: a (type: int) - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE value expressions: d (type: int) Reducer 2 Reduce Operator Tree: @@ -209,14 +192,11 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: complete outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 39 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -259,29 +239,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: abcd - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: a (type: int), b (type: int), c (type: int), d (type: int) outputColumnNames: a, b, c, d - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: a (type: int), b (type: int), c (type: int), d (type: int) sort order: ++++ - Statistics: Num rows: 4 Data size: 78 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator aggregations: count(1), count(), count(KEY._col0:0._col0), count(KEY._col0:1._col0), count(KEY._col0:2._col0), count(KEY._col0:3._col0), count(DISTINCT KEY._col0:0._col0), count(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0), count(DISTINCT KEY._col0:3._col0), count(DISTINCT KEY._col0:4._col0, KEY._col0:4._col1), count(DISTINCT KEY._col0:5._col0, KEY._col0:5._col1), count(DISTINCT KEY._col0:6._col0, KEY._col0:6._col1), count(DISTINCT KEY._col0:7._col0, KEY._col0:7._col1), count(DISTINCT KEY._col0:8._col0, KEY._col0:8._col1), count(DISTINCT KEY._col0:9._col0, KEY._col0:9._col1), count(DISTINCT KEY._col0:10._col0, KEY._col0:10._col1, KEY._col0:10._col2), count(DISTINCT KEY._col0:11._col0, KEY._col0:11._col1, KEY._col0:11._col2), count(DISTINCT KEY._col0:12._col0, KEY._col0:12._col1, KEY._col0:12._col2), count(DISTINCT KEY._col0:13._col0, KEY._col0:13._col1, KEY._col0:13._col2), count(DISTINCT KEY._col0:14._col0, KEY._col0:14._col1, KEY._col0:14._col2, KEY._col0:14._col3) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: bigint), _col17 (type: bigint), _col18 (type: bigint), _col19 (type: bigint), _col20 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 - Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out b/ql/src/test/results/clientpositive/spark/ctas.q.out index 4c9a369..fd06331 100644 --- a/ql/src/test/results/clientpositive/spark/ctas.q.out +++ b/ql/src/test/results/clientpositive/spark/ctas.q.out @@ -41,40 +41,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -190,40 +181,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -339,40 +321,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (key / 2) (type: double), concat(value, '_con') (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double), _col1 (type: string) sort order: ++ - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat @@ -553,40 +526,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -745,16 +709,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: true Path -> Alias: @@ -812,14 +773,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false Reducer 3 @@ -828,16 +786,13 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out b/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out index 18cfa44..25ba7eb 100644 --- a/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out +++ b/ql/src/test/results/clientpositive/spark/disable_merge_for_bucketing.q.out @@ -48,16 +48,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string), _col1 (type: string) auto parallelism: false @@ -114,17 +111,14 @@ STAGE PLANS: Needs Tagging: false Reduce Operator Tree: Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -211,28 +205,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 55 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 2853 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out b/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out index 4938ea5..d0dfed4 100644 --- a/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out +++ b/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out @@ -21,25 +21,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -72,25 +67,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out b/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out index 7584e62..9ba7745 100644 --- a/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out +++ b/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out @@ -21,25 +21,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -72,25 +67,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out b/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out index 7162a83..bf8ddd5 100644 --- a/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out +++ b/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out @@ -21,24 +21,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -71,24 +66,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out b/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out index ba3aece..866d008 100644 --- a/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out +++ b/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out @@ -21,24 +21,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -71,24 +66,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby1.q.out b/ql/src/test/results/clientpositive/spark/groupby1.q.out index 2b1ada2..d14c033 100644 --- a/ql/src/test/results/clientpositive/spark/groupby1.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby1.q.out @@ -34,16 +34,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: substr(value, 5) (type: string) Reducer 2 Reduce Operator Tree: @@ -52,12 +49,10 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: partial1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 3 Reduce Operator Tree: @@ -66,14 +61,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby2.q.out b/ql/src/test/results/clientpositive/spark/groupby2.q.out index ea6c3db..2a6fe15 100644 --- a/ql/src/test/results/clientpositive/spark/groupby2.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby2.q.out @@ -31,16 +31,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string) sort order: ++ Map-reduce partition columns: substr(key, 1, 1) (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator @@ -48,14 +45,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby3.q.out b/ql/src/test/results/clientpositive/spark/groupby3.q.out index 3fb4f7a..295eaa6 100644 --- a/ql/src/test/results/clientpositive/spark/groupby3.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby3.q.out @@ -50,26 +50,21 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) outputColumnNames: value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(value, 5) (type: string) sort order: + Map-reduce partition columns: substr(value, 5) (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator aggregations: sum(KEY._col0:0._col0), avg(KEY._col0:0._col0), avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), min(KEY._col0:0._col0), std(KEY._col0:0._col0), stddev_samp(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0) mode: partial1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 176 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 176 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: double), _col1 (type: struct), _col2 (type: struct), _col3 (type: string), _col4 (type: string), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct), _col8 (type: struct) Reducer 3 Reduce Operator Tree: @@ -77,14 +72,11 @@ STAGE PLANS: aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(VALUE._col2), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8) mode: final outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby3_map.q.out b/ql/src/test/results/clientpositive/spark/groupby3_map.q.out index 47951f7..5d3aab9 100644 --- a/ql/src/test/results/clientpositive/spark/groupby3_map.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby3_map.q.out @@ -49,21 +49,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) outputColumnNames: value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(substr(value, 5)), avg(substr(value, 5)), avg(DISTINCT substr(value, 5)), max(substr(value, 5)), min(substr(value, 5)), std(substr(value, 5)), stddev_samp(substr(value, 5)), variance(substr(value, 5)), var_samp(substr(value, 5)) keys: substr(value, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: struct), _col4 (type: string), _col5 (type: string), _col6 (type: struct), _col7 (type: struct), _col8 (type: struct), _col9 (type: struct) Reducer 2 Reduce Operator Tree: @@ -71,14 +67,11 @@ STAGE PLANS: aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(DISTINCT KEY._col0:0._col0), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out index 1f312c2..5395339 100644 --- a/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby3_map_multi_distinct.q.out @@ -53,21 +53,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) outputColumnNames: value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(substr(value, 5)), avg(substr(value, 5)), avg(DISTINCT substr(value, 5)), max(substr(value, 5)), min(substr(value, 5)), std(substr(value, 5)), stddev_samp(substr(value, 5)), variance(substr(value, 5)), var_samp(substr(value, 5)), sum(DISTINCT substr(value, 5)), count(DISTINCT substr(value, 5)) keys: substr(value, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: struct), _col4 (type: string), _col5 (type: string), _col6 (type: struct), _col7 (type: struct), _col8 (type: struct), _col9 (type: struct) Reducer 2 Reduce Operator Tree: @@ -75,14 +71,11 @@ STAGE PLANS: aggregations: sum(VALUE._col0), avg(VALUE._col1), avg(DISTINCT KEY._col0:0._col0), max(VALUE._col3), min(VALUE._col4), std(VALUE._col5), stddev_samp(VALUE._col6), variance(VALUE._col7), var_samp(VALUE._col8), sum(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), UDFToDouble(_col10) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 248 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out index 096687c..68ea6e2 100644 --- a/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby3_noskew.q.out @@ -49,29 +49,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) outputColumnNames: value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(value, 5) (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator aggregations: sum(KEY._col0:0._col0), avg(KEY._col0:0._col0), avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), min(KEY._col0:0._col0), std(KEY._col0:0._col0), stddev_samp(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out index 29e14c5..9ff5e98 100644 --- a/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby3_noskew_multi_distinct.q.out @@ -53,29 +53,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) outputColumnNames: value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(value, 5) (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator aggregations: sum(KEY._col0:0._col0), avg(KEY._col0:0._col0), avg(DISTINCT KEY._col0:0._col0), max(KEY._col0:0._col0), min(KEY._col0:0._col0), std(KEY._col0:0._col0), stddev_samp(KEY._col0:0._col0), variance(KEY._col0:0._col0), var_samp(KEY._col0:0._col0), sum(DISTINCT KEY._col0:1._col0), count(DISTINCT KEY._col0:2._col0) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1 Data size: 240 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), UDFToDouble(_col3) (type: double), UDFToDouble(_col4) (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), UDFToDouble(_col10) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 1 Data size: 240 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 240 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby4.q.out b/ql/src/test/results/clientpositive/spark/groupby4.q.out index 9c97961..7bf3ea1 100644 --- a/ql/src/test/results/clientpositive/spark/groupby4.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby4.q.out @@ -36,42 +36,34 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(key, 1, 1) (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: partial1 outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map.q.out index 59e6884..95d7b59 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_map.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_map.q.out @@ -47,7 +47,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true table: @@ -67,18 +66,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(substr(value, 5)) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 4 Reduce Operator Tree: @@ -87,14 +83,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -142,18 +135,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(substr(value, 5)) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 5 Reduce Operator Tree: @@ -162,14 +152,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out index e9632e1..dad908f 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out @@ -47,34 +47,27 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: substr(value, 5) (type: string) Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(VALUE._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -85,14 +78,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out index 1586f4c..b425c67 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out @@ -47,7 +47,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true table: @@ -68,18 +67,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(substr(value, 5)) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 4 Reduce Operator Tree: @@ -88,12 +84,10 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 5 Reduce Operator Tree: @@ -102,14 +96,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -158,18 +149,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(substr(value, 5)) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 6 Reduce Operator Tree: @@ -178,12 +166,10 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 7 Reduce Operator Tree: @@ -192,14 +178,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out index 29d7451..dc713b3 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out @@ -47,7 +47,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true table: @@ -67,12 +66,10 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: substr(value, 5) (type: string) Reducer 4 Reduce Operator Tree: @@ -81,14 +78,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -136,12 +130,10 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: substr(value, 5) (type: string) Reducer 5 Reduce Operator Tree: @@ -150,14 +142,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out index ba8b780..cd8e85e 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out @@ -39,22 +39,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -63,14 +59,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -129,22 +122,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(DISTINCT val) keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator @@ -152,14 +141,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -207,22 +193,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -231,12 +213,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 3 Reduce Operator Tree: @@ -245,14 +225,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -311,22 +288,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(DISTINCT val) keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator @@ -334,14 +307,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -412,7 +382,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false table: @@ -433,18 +402,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -453,12 +419,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -467,14 +431,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -523,18 +484,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(1) keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 6 Reduce Operator Tree: @@ -543,12 +501,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 7 Reduce Operator Tree: @@ -557,14 +513,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out index c6d84df..801ac8a 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out @@ -75,37 +75,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string) sort order: ++ Map-reduce partition columns: substr(key, 1, 1) (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 >= 5) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0), sum(KEY._col1:0._col0), sum(DISTINCT KEY._col1:1._col0), count(VALUE._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string), UDFToInteger(_col3) (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -113,20 +105,16 @@ STAGE PLANS: name: default.dest_g2 Filter Operator predicate: (KEY._col0 < 5) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0), sum(KEY._col1:0._col0), sum(DISTINCT KEY._col1:1._col0), count(VALUE._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string), UDFToInteger(_col3) (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -137,14 +125,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string), UDFToInteger(_col3) (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -314,7 +299,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -334,33 +318,26 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string) sort order: ++ Map-reduce partition columns: substr(key, 1, 1) (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 4 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 >= 5) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0), sum(KEY._col1:0._col0), sum(DISTINCT KEY._col1:1._col0), count(VALUE._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string), UDFToInteger(_col3) (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -368,20 +345,16 @@ STAGE PLANS: name: default.dest_g2 Filter Operator predicate: (KEY._col0 < 5) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0), sum(KEY._col1:0._col0), sum(DISTINCT KEY._col1:1._col0), count(VALUE._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string), UDFToInteger(_col3) (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -392,14 +365,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string), UDFToInteger(_col3) (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -487,50 +457,39 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(key, 1, 1) (type: string), substr(key, 2, 1) (type: string), substr(value, 5) (type: string) sort order: +++ Map-reduce partition columns: substr(key, 1, 1) (type: string), substr(key, 2, 1) (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 5 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0), sum(KEY._col2:0._col0), count(VALUE._col0) keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: bigint), concat(_col0, _col3) (type: string), _col3 (type: double), _col4 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: double), _col4 (type: bigint) Filter Operator predicate: (KEY._col0 >= 5) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0), sum(KEY._col2:0._col0), count(VALUE._col0) keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col2) (type: int), concat(_col0, _col3) (type: string), UDFToInteger(_col3) (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -541,17 +500,13 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: double), VALUE._col4 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out index 47aea2a..829ab23 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out @@ -43,40 +43,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((substr(key, 1, 1) >= 5) or (substr(key, 1, 1) < 5)) (type: boolean) - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(key, 1, 1) (type: string), key (type: string) sort order: ++ Map-reduce partition columns: substr(key, 1, 1) (type: string) - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 >= 5) (type: boolean) - Statistics: Num rows: 110 Data size: 1168 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,20 +75,16 @@ STAGE PLANS: name: default.dest_g2 Filter Operator predicate: (KEY._col0 < 5) (type: boolean) - Statistics: Num rows: 110 Data size: 1168 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0), count(VALUE._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out index 0e387a3..43a1b9a 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out @@ -57,40 +57,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((VALUE._col0) IN ('val_100', 'val_200', 'val_300') and (KEY._col0) IN (100, 150, 200)) (type: boolean) - Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -98,20 +89,16 @@ STAGE PLANS: name: default.e1 Filter Operator predicate: ((VALUE._col0) IN ('val_400', 'val_500') and (KEY._col0) IN (400, 450)) (type: boolean) - Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -239,40 +226,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: string) sort order: + Map-reduce partition columns: value (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string) Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100)) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -280,20 +258,16 @@ STAGE PLANS: name: default.e1 Filter Operator predicate: (((VALUE._col0 + VALUE._col0) = 400) or (((VALUE._col0 - 100) = 500) and KEY._col0 is not null)) (type: boolean) - Statistics: Num rows: 375 Data size: 3984 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 187 Data size: 1986 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 187 Data size: 1986 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 187 Data size: 1986 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -421,40 +395,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((VALUE._col0) IN ('val_100', 'val_200', 'val_300') and (KEY._col0) IN (100, 150, 200)) (type: boolean) - Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -462,20 +427,16 @@ STAGE PLANS: name: default.e1 Filter Operator predicate: ((VALUE._col0) IN ('val_400', 'val_500') and (KEY._col0) IN (400, 450)) (type: boolean) - Statistics: Num rows: 62 Data size: 658 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -603,40 +564,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: string) sort order: + Map-reduce partition columns: value (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string) Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((((VALUE._col0 + VALUE._col0) = 200) or ((VALUE._col0 - 100) = 100)) or ((VALUE._col0 = 300) and KEY._col0 is not null)) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -644,20 +596,16 @@ STAGE PLANS: name: default.e1 Filter Operator predicate: (((VALUE._col0 + VALUE._col0) = 400) or (((VALUE._col0 - 100) = 500) and KEY._col0 is not null)) (type: boolean) - Statistics: Num rows: 375 Data size: 3984 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 187 Data size: 1986 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 187 Data size: 1986 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 187 Data size: 1986 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby_position.q.out b/ql/src/test/results/clientpositive/spark/groupby_position.q.out index 924e02b..b04e55c 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_position.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_position.q.out @@ -47,7 +47,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -66,22 +65,18 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 20) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT substr(value, 5)) keys: key (type: string), substr(value, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reducer 4 Reduce Operator Tree: Group By Operator @@ -89,14 +84,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -143,22 +135,18 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 20) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT substr(value, 5)) keys: key (type: string), value (type: string), substr(value, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Group By Operator @@ -166,14 +154,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -272,7 +257,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -291,22 +275,18 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 20) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT substr(value, 5)) keys: key (type: string), substr(value, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reducer 4 Reduce Operator Tree: Group By Operator @@ -314,14 +294,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -368,22 +345,18 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 20) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), key (type: string) outputColumnNames: value, key - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT substr(value, 5)) keys: value (type: string), key (type: string), substr(value, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Group By Operator @@ -391,14 +364,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col1) (type: int), _col0 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -498,25 +468,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key <= 20) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -525,24 +490,19 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: bigint), _col0 (type: string) sort order: -+ - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -630,47 +590,37 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key > 10) and (key < 20)) and key is not null) (type: boolean) - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT substr(value, 5)) keys: key (type: string), value (type: string), substr(value, 5) (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Map 5 Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key > 15) and (key < 25)) and key is not null) (type: boolean) - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string), value (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator @@ -678,16 +628,13 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 3 Reduce Operator Tree: @@ -698,24 +645,19 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col3, _col4 - Statistics: Num rows: 15 Data size: 162 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 15 Data size: 162 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) sort order: --++ - Statistics: Num rows: 15 Data size: 162 Basic stats: COMPLETE Column stats: NONE Reducer 4 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 15 Data size: 162 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 15 Data size: 162 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -726,16 +668,13 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Stage: Stage-0 diff --git a/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out b/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out index 991c9f3..00c443e 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out @@ -108,17 +108,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: substr(key, 1, 1) (type: string), substr(value, 5) (type: string) sort order: ++ Map-reduce partition columns: substr(key, 1, 1) (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: true Path -> Alias: @@ -227,17 +224,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int), concat(_col0, _col2) (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out index 2df6f75..4bde6ea 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out @@ -39,22 +39,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -63,14 +59,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -123,22 +116,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(DISTINCT val) keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator @@ -146,14 +135,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -201,22 +187,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -225,12 +207,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 3 Reduce Operator Tree: @@ -239,14 +219,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -299,22 +276,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(DISTINCT val) keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator @@ -322,14 +295,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -400,7 +370,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false table: @@ -421,18 +390,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -441,12 +407,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -455,14 +419,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -511,18 +472,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(1) keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 6 Reduce Operator Tree: @@ -531,12 +489,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE value expressions: _col3 (type: bigint) Reducer 7 Reduce Operator Tree: @@ -545,14 +501,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out index c256b90..ab2fe84 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out @@ -95,28 +95,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -311,23 +306,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -392,17 +383,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -546,28 +534,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -774,28 +757,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -998,28 +976,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: 1 (type: int), key (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1218,23 +1191,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), 1 (type: int), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col3 (type: bigint) auto parallelism: true @@ -1299,17 +1268,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1450,23 +1416,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), (key + 1) (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -1531,17 +1493,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: double) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1718,33 +1677,27 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: sum(_col1) keys: (_col0 + _col0) (type: double) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double) sort order: + Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -1809,17 +1762,14 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -2480,23 +2430,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: (key + key) (type: double) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double) sort order: + Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -2791,31 +2737,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 0 value expressions: _col1 (type: bigint) auto parallelism: true @@ -2876,31 +2816,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -2967,17 +2901,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger((_col1 + _col3)) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -3176,31 +3107,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 0 value expressions: _col1 (type: bigint) auto parallelism: true @@ -3261,27 +3186,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -3348,17 +3268,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string), _col4 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -3382,16 +3299,13 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 1 value expressions: _col1 (type: string), _col2 (type: bigint) auto parallelism: true @@ -3477,24 +3391,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) bucketGroup: true keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -3559,17 +3469,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -3716,28 +3623,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), 1 (type: int), val (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -3951,28 +3853,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), 1 (type: int), val (type: string), 2 (type: int) mode: final outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4190,28 +4087,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), 1 (type: int), val (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string), _col1 (type: int), _col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4468,28 +4360,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), 2 (type: int), val (type: string) outputColumnNames: _col0, _col3, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string), _col3 (type: int), _col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4687,7 +4574,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: true table: @@ -4707,19 +4593,16 @@ STAGE PLANS: Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) bucketGroup: true keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -4728,14 +4611,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4781,20 +4661,16 @@ STAGE PLANS: Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4880,14 +4756,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (key = 8) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: val (type: string) outputColumnNames: _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true table: @@ -4907,19 +4780,16 @@ STAGE PLANS: Select Operator expressions: '8' (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) bucketGroup: true keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -4928,14 +4798,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4981,20 +4848,16 @@ STAGE PLANS: Select Operator expressions: '8' (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string), _col1 (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out index 3d2cebe..5c1cbc4 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out @@ -95,28 +95,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -312,23 +307,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -393,12 +384,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -410,17 +399,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -564,28 +550,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -792,28 +773,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1016,28 +992,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: 1 (type: int), key (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1237,23 +1208,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), 1 (type: int), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col3 (type: bigint) auto parallelism: true @@ -1318,12 +1285,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col3 (type: bigint) auto parallelism: true @@ -1335,17 +1300,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1487,23 +1449,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), (key + 1) (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -1568,12 +1526,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: double) mode: partials outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -1585,17 +1541,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: double) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1773,33 +1726,27 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: sum(_col1) keys: (_col0 + _col0) (type: double) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -1864,12 +1811,10 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double) sort order: + Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -1881,17 +1826,14 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -2553,23 +2495,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: (key + key) (type: double) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -2634,12 +2572,10 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double) sort order: + Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -2881,31 +2817,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 0 value expressions: _col1 (type: bigint) auto parallelism: true @@ -2966,31 +2896,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -3057,17 +2981,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger((_col1 + _col3)) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -3267,31 +3188,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 0 value expressions: _col1 (type: bigint) auto parallelism: true @@ -3352,27 +3267,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -3439,17 +3349,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string), _col4 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -3473,12 +3380,10 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: true @@ -3490,16 +3395,13 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 1 value expressions: _col1 (type: string), _col2 (type: bigint) auto parallelism: true @@ -3586,24 +3488,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) bucketGroup: true keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -3668,12 +3566,10 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -3685,17 +3581,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -3842,28 +3735,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), 1 (type: int), val (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4077,28 +3965,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), 1 (type: int), val (type: string), 2 (type: int) mode: final outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4316,28 +4199,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), 1 (type: int), val (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string), _col1 (type: int), _col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4594,28 +4472,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE GatherStats: false Select Operator expressions: key (type: string), 2 (type: int), val (type: string) outputColumnNames: _col0, _col3, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string), _col3 (type: int), _col2 (type: string) mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4813,7 +4686,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: true table: @@ -4834,19 +4706,16 @@ STAGE PLANS: Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) bucketGroup: true keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -4855,12 +4724,10 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -4869,14 +4736,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4922,20 +4786,16 @@ STAGE PLANS: Select Operator expressions: key (type: string), val (type: string) outputColumnNames: key, val - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), val (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -5021,14 +4881,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Filter Operator predicate: (key = 8) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: val (type: string) outputColumnNames: _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true table: @@ -5049,19 +4906,16 @@ STAGE PLANS: Select Operator expressions: '8' (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) bucketGroup: true keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -5070,12 +4924,10 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -5084,14 +4936,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -5137,20 +4986,16 @@ STAGE PLANS: Select Operator expressions: '8' (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) keys: _col0 (type: string), _col1 (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: true - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/having.q.out b/ql/src/test/results/clientpositive/spark/having.q.out index ebb9fe8..093746a 100644 --- a/ql/src/test/results/clientpositive/spark/having.q.out +++ b/ql/src/test/results/clientpositive/spark/having.q.out @@ -19,22 +19,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(value) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -43,17 +39,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col1 > 3) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -102,25 +94,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key <> 302) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -129,14 +116,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -483,22 +467,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -507,17 +487,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col1 > 'val_255') (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -755,25 +731,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > 300) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -782,17 +753,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col1 > 'val_255') (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -956,22 +923,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(value) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -980,17 +943,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col1 > 'val_255') (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1228,22 +1187,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(value) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -1252,17 +1207,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col1 >= 4) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/innerjoin.q.out b/ql/src/test/results/clientpositive/spark/innerjoin.q.out index 0db53de..63a73d1 100644 --- a/ql/src/test/results/clientpositive/spark/innerjoin.q.out +++ b/ql/src/test/results/clientpositive/spark/innerjoin.q.out @@ -35,29 +35,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -67,14 +61,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} outputColumnNames: _col0, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1201,27 +1192,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -1232,14 +1218,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/input12.q.out b/ql/src/test/results/clientpositive/spark/input12.q.out index a4b7a3c..4b0cf44 100644 --- a/ql/src/test/results/clientpositive/spark/input12.q.out +++ b/ql/src/test/results/clientpositive/spark/input12.q.out @@ -60,7 +60,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -77,14 +76,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 100) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -145,14 +141,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key >= 100) and (key < 200)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -168,14 +161,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key >= 200) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/input13.q.out b/ql/src/test/results/clientpositive/spark/input13.q.out index 5c799dc..260a65a 100644 --- a/ql/src/test/results/clientpositive/spark/input13.q.out +++ b/ql/src/test/results/clientpositive/spark/input13.q.out @@ -60,7 +60,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -77,14 +76,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 100) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -145,14 +141,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key >= 100) and (key < 200)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -168,14 +161,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key >= 200) and (key < 300)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -191,14 +181,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key >= 300) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string) outputColumnNames: _col0 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/input14.q.out b/ql/src/test/results/clientpositive/spark/input14.q.out index fcd915b..42035b8 100644 --- a/ql/src/test/results/clientpositive/spark/input14.q.out +++ b/ql/src/test/results/clientpositive/spark/input14.q.out @@ -41,36 +41,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 < 100) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/input17.q.out b/ql/src/test/results/clientpositive/spark/input17.q.out index dcdeb98..346edc4 100644 --- a/ql/src/test/results/clientpositive/spark/input17.q.out +++ b/ql/src/test/results/clientpositive/spark/input17.q.out @@ -41,33 +41,27 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src_thrift - Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (aint + lint[0]) (type: int), lintstring[0] (type: struct) outputColumnNames: _col0, _col1 - Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: UDFToInteger(VALUE._col0) (type: int), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 11 Data size: 3070 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/input18.q.out b/ql/src/test/results/clientpositive/spark/input18.q.out index 58c7ec9..188db3b 100644 --- a/ql/src/test/results/clientpositive/spark/input18.q.out +++ b/ql/src/test/results/clientpositive/spark/input18.q.out @@ -41,36 +41,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), 3 (type: int), 7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 < 100) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: UDFToInteger(VALUE._col0) (type: int), regexp_replace(VALUE._col1, ' ', '+') (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/input1_limit.q.out b/ql/src/test/results/clientpositive/spark/input1_limit.q.out index a119f5e..90bc8ea 100644 --- a/ql/src/test/results/clientpositive/spark/input1_limit.q.out +++ b/ql/src/test/results/clientpositive/spark/input1_limit.q.out @@ -47,7 +47,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -66,34 +65,26 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 100) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 4 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -140,34 +131,26 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 100) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 5 - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 5 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 5 - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/input_part2.q.out b/ql/src/test/results/clientpositive/spark/input_part2.q.out index 514f54a..f2f3a2d 100644 --- a/ql/src/test/results/clientpositive/spark/input_part2.q.out +++ b/ql/src/test/results/clientpositive/spark/input_part2.q.out @@ -146,7 +146,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false File Output Operator compressed: false @@ -275,17 +274,14 @@ STAGE PLANS: Filter Operator isSamplingPred: false predicate: ((key < 100) and (ds = '2008-04-08')) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string), hr (type: string), '2008-04-08' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -398,17 +394,14 @@ STAGE PLANS: Filter Operator isSamplingPred: false predicate: ((key < 100) and (ds = '2008-04-09')) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string), hr (type: string), '2008-04-09' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 2 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/insert1.q.out b/ql/src/test/results/clientpositive/spark/insert1.q.out index 1b88026..65032cb 100644 --- a/ql/src/test/results/clientpositive/spark/insert1.q.out +++ b/ql/src/test/results/clientpositive/spark/insert1.q.out @@ -48,17 +48,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (key = -1) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: -1 (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -100,17 +96,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (key = -1) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: -1 (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -168,17 +160,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (key = -1) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: -1 (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -220,17 +208,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (key = -1) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: -1 (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -282,7 +266,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: insert2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -299,14 +282,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -351,14 +331,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/insert_into1.q.out b/ql/src/test/results/clientpositive/spark/insert_into1.q.out index b00f3f9..6c3cbe3 100644 --- a/ql/src/test/results/clientpositive/spark/insert_into1.q.out +++ b/ql/src/test/results/clientpositive/spark/insert_into1.q.out @@ -31,34 +31,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -105,6 +97,66 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 10226524244 +PREHOOK: query: explain +select count(*) from insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +100 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 @@ -126,34 +178,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -200,11 +244,62 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 20453048488 -PREHOOK: query: SELECT COUNT(*) FROM insert_into1 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 PREHOOK: type: QUERY PREHOOK: Input: default@insert_into1 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM insert_into1 +POSTHOOK: query: select count(*) from insert_into1 POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### @@ -230,34 +325,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -304,6 +391,66 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### -826625916 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into1 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +10 PREHOOK: query: DROP TABLE insert_into1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into1 diff --git a/ql/src/test/results/clientpositive/spark/insert_into2.q.out b/ql/src/test/results/clientpositive/spark/insert_into2.q.out index c1b2be3..919f052 100644 --- a/ql/src/test/results/clientpositive/spark/insert_into2.q.out +++ b/ql/src/test/results/clientpositive/spark/insert_into2.q.out @@ -35,34 +35,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -98,6 +90,68 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@insert_into2@ds=1 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +select count (*) from insert_into2 where ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count (*) from insert_into2 where ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into2 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count (*) from insert_into2 where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=1 +#### A masked pattern was here #### +POSTHOOK: query: select count (*) from insert_into2 where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=1 +#### A masked pattern was here #### +100 PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -109,6 +163,57 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@insert_into2@ds=1 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='1' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into2 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1' PREHOOK: type: QUERY PREHOOK: Input: default@insert_into2 @@ -158,34 +263,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -240,6 +337,68 @@ POSTHOOK: Input: default@insert_into2@ds=1 POSTHOOK: Input: default@insert_into2@ds=2 #### A masked pattern was here #### -36239931656 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into2 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +100 PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') SELECT * FROM src LIMIT 50 PREHOOK: type: QUERY @@ -263,34 +422,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 50 - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 50 - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -345,6 +496,68 @@ POSTHOOK: Input: default@insert_into2@ds=1 POSTHOOK: Input: default@insert_into2@ds=2 #### A masked pattern was here #### -27100860056 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: insert_into2 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +PREHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +POSTHOOK: Input: default@insert_into2@ds=2 +#### A masked pattern was here #### +50 PREHOOK: query: DROP TABLE insert_into2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into2 diff --git a/ql/src/test/results/clientpositive/spark/insert_into3.q.out b/ql/src/test/results/clientpositive/spark/insert_into3.q.out index aa84112..7964802 100644 --- a/ql/src/test/results/clientpositive/spark/insert_into3.q.out +++ b/ql/src/test/results/clientpositive/spark/insert_into3.q.out @@ -47,7 +47,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -67,27 +66,21 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 4 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 50 - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 50 Data size: 500 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -135,27 +128,21 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -231,7 +218,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -251,30 +237,23 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 4 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -322,30 +301,23 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 5 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join0.q.out b/ql/src/test/results/clientpositive/spark/join0.q.out index e0df0ad..d6b219c 100644 --- a/ql/src/test/results/clientpositive/spark/join0.q.out +++ b/ql/src/test/results/clientpositive/spark/join0.q.out @@ -30,33 +30,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Map 4 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -67,24 +59,19 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) sort order: ++++ - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join1.q.out b/ql/src/test/results/clientpositive/spark/join1.q.out index 517a983..f267a63 100644 --- a/ql/src/test/results/clientpositive/spark/join1.q.out +++ b/ql/src/test/results/clientpositive/spark/join1.q.out @@ -31,29 +31,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -63,14 +57,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} outputColumnNames: _col0, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join10.q.out b/ql/src/test/results/clientpositive/spark/join10.q.out index 2184033..5b2a3d9 100644 --- a/ql/src/test/results/clientpositive/spark/join10.q.out +++ b/ql/src/test/results/clientpositive/spark/join10.q.out @@ -27,36 +27,28 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -67,14 +59,11 @@ STAGE PLANS: 0 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col2, _col3 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join11.q.out b/ql/src/test/results/clientpositive/spark/join11.q.out index a335787..2f64641 100644 --- a/ql/src/test/results/clientpositive/spark/join11.q.out +++ b/ql/src/test/results/clientpositive/spark/join11.q.out @@ -29,37 +29,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 100) and key is not null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 3 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 100) and key is not null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -69,14 +61,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} outputColumnNames: _col0, _col3 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join12.q.out b/ql/src/test/results/clientpositive/spark/join12.q.out index 40a7641..97942a4 100644 --- a/ql/src/test/results/clientpositive/spark/join12.q.out +++ b/ql/src/test/results/clientpositive/spark/join12.q.out @@ -35,54 +35,42 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key < 100) and key is not null) and (key < 80)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 3 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key < 80) and key is not null) and (key < 100)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key < 100) and key is not null) and (key < 80)) (type: boolean) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -94,14 +82,11 @@ STAGE PLANS: 1 {VALUE._col0} 2 outputColumnNames: _col0, _col3 - Statistics: Num rows: 59 Data size: 629 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 59 Data size: 629 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 59 Data size: 629 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join13.q.out b/ql/src/test/results/clientpositive/spark/join13.q.out index f247fad..07663c5 100644 --- a/ql/src/test/results/clientpositive/spark/join13.q.out +++ b/ql/src/test/results/clientpositive/spark/join13.q.out @@ -36,54 +36,42 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 100) and key is not null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 4 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 100) and key is not null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Map 5 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 200) and UDFToDouble(key) is not null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(_col0) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(_col0) (type: double) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -93,15 +81,12 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 + _col2) is not null (type: boolean) - Statistics: Num rows: 46 Data size: 489 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: (_col0 + _col2) (type: double) sort order: + Map-reduce partition columns: (_col0 + _col2) (type: double) - Statistics: Num rows: 46 Data size: 489 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col3 (type: string) Reducer 3 Reduce Operator Tree: @@ -112,14 +97,11 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col3} 1 outputColumnNames: _col0, _col3 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join14.q.out b/ql/src/test/results/clientpositive/spark/join14.q.out index fe40867..23b54bb 100644 --- a/ql/src/test/results/clientpositive/spark/join14.q.out +++ b/ql/src/test/results/clientpositive/spark/join14.q.out @@ -35,29 +35,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 100) and key is not null) (type: boolean) - Statistics: Num rows: 167 Data size: 1774 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 167 Data size: 1774 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 100) and key is not null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -67,14 +61,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} outputColumnNames: _col0, _col6 - Statistics: Num rows: 183 Data size: 1951 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 183 Data size: 1951 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 183 Data size: 1951 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join15.q.out b/ql/src/test/results/clientpositive/spark/join15.q.out index 3128191..9085ca0 100644 --- a/ql/src/test/results/clientpositive/spark/join15.q.out +++ b/ql/src/test/results/clientpositive/spark/join15.q.out @@ -20,29 +20,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 4 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: @@ -53,24 +47,19 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) sort order: ++++ - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join16.q.out b/ql/src/test/results/clientpositive/spark/join16.q.out index c78704d..a247e1c 100644 --- a/ql/src/test/results/clientpositive/spark/join16.q.out +++ b/ql/src/test/results/clientpositive/spark/join16.q.out @@ -17,32 +17,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((((key > 10) and (key > 20)) and key is not null) and value is not null) and (value < 200)) (type: boolean) - Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 4 Data size: 42 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: tab - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((((key > 20) and key is not null) and value is not null) and (value < 200)) (type: boolean) - Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -52,14 +45,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey1} outputColumnNames: _col0, _col3 - Statistics: Num rows: 15 Data size: 162 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 162 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 15 Data size: 162 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join17.q.out b/ql/src/test/results/clientpositive/spark/join17.q.out index f491b7e..3a76709 100644 --- a/ql/src/test/results/clientpositive/spark/join17.q.out +++ b/ql/src/test/results/clientpositive/spark/join17.q.out @@ -69,17 +69,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 1 value expressions: value (type: string) auto parallelism: true @@ -136,17 +133,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 0 value expressions: value (type: string) auto parallelism: true @@ -209,17 +203,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col5) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/join18.q.out b/ql/src/test/results/clientpositive/spark/join18.q.out index 1e22ff5..6f60751 100644 --- a/ql/src/test/results/clientpositive/spark/join18.q.out +++ b/ql/src/test/results/clientpositive/spark/join18.q.out @@ -41,42 +41,34 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT value) keys: key (type: string), value (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(value) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -85,16 +77,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 12 Data size: 91 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 3 Reduce Operator Tree: @@ -105,14 +94,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -124,16 +110,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Stage: Stage-0 diff --git a/ql/src/test/results/clientpositive/spark/join19.q.out b/ql/src/test/results/clientpositive/spark/join19.q.out index ae00709..f71a4be 100644 --- a/ql/src/test/results/clientpositive/spark/join19.q.out +++ b/ql/src/test/results/clientpositive/spark/join19.q.out @@ -135,108 +135,84 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t6 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: ((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__LABEL_REL') and subject is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: subject (type: string), object (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: string) Map 3 Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: ((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__LABEL_REL') and subject is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: subject (type: string), object (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: string) Map 6 Map Operator Tree: TableScan alias: t3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_from') and object is not null) and subject is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: subject (type: string), object (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col0 (type: string) Map 7 Map Operator Tree: TableScan alias: t5 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (((predicate = 'http://www.ontosearch.com/2007/12/ontosofa-ns#_to') and subject is not null) and object is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: subject (type: string), object (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: string) Map 8 Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Citation')) and subject is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: subject (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Map 9 Map Operator Tree: TableScan alias: t4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (((predicate = 'http://sofa.semanticweb.org/sofa/v1.0/system#__INSTANCEOF_REL') and (object = 'http://ontos/OntosMiner/Common.English/ontology#Author')) and subject is not null) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: subject (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -246,14 +222,11 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col2} {VALUE._col3} {KEY.reducesinkkey0} 1 {VALUE._col0} outputColumnNames: _col0, _col2, _col3, _col7, _col9 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: string), _col3 (type: string), _col7 (type: string), _col9 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -269,12 +242,10 @@ STAGE PLANS: 1 {VALUE._col0} 2 {VALUE._col0} outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col3 (type: string) sort order: + Map-reduce partition columns: _col3 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col0 (type: string), _col2 (type: string) Reducer 5 Reduce Operator Tree: @@ -287,12 +258,10 @@ STAGE PLANS: 1 2 {VALUE._col0} outputColumnNames: _col0, _col2, _col3, _col7 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col7 (type: string) sort order: + Map-reduce partition columns: _col7 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col0 (type: string), _col2 (type: string), _col3 (type: string) Stage: Stage-0 diff --git a/ql/src/test/results/clientpositive/spark/join2.q.out b/ql/src/test/results/clientpositive/spark/join2.q.out index dcb4338..e5c82f3 100644 --- a/ql/src/test/results/clientpositive/spark/join2.q.out +++ b/ql/src/test/results/clientpositive/spark/join2.q.out @@ -32,42 +32,33 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: src3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: UDFToDouble(key) is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(key) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(key) (type: double) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 5 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -77,15 +68,12 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col5 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 + _col5) is not null (type: boolean) - Statistics: Num rows: 138 Data size: 1465 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: (_col0 + _col5) (type: double) sort order: + Map-reduce partition columns: (_col0 + _col5) (type: double) - Statistics: Num rows: 138 Data size: 1465 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 3 Reduce Operator Tree: @@ -96,14 +84,11 @@ STAGE PLANS: 0 {VALUE._col0} 1 {VALUE._col1} outputColumnNames: _col0, _col11 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col11 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join20.q.out b/ql/src/test/results/clientpositive/spark/join20.q.out index 2e5db05..60bfb4f 100644 --- a/ql/src/test/results/clientpositive/spark/join20.q.out +++ b/ql/src/test/results/clientpositive/spark/join20.q.out @@ -22,40 +22,32 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 4 Map Operator Tree: TableScan alias: src3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 5 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: @@ -72,24 +64,19 @@ STAGE PLANS: 1 2 {(KEY.reducesinkkey0 < 20)} outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string) sort order: ++++++ - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -683,40 +670,32 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) and (key < 15)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 4 Map Operator Tree: TableScan alias: src3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 5 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 15) and (key < 10)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: @@ -733,24 +712,19 @@ STAGE PLANS: 1 2 {(KEY.reducesinkkey0 < 20)} outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string) sort order: ++++++ - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join21.q.out b/ql/src/test/results/clientpositive/spark/join21.q.out index 47e2ee0..b927ab9 100644 --- a/ql/src/test/results/clientpositive/spark/join21.q.out +++ b/ql/src/test/results/clientpositive/spark/join21.q.out @@ -20,37 +20,30 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 4 Map Operator Tree: TableScan alias: src3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 5 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: @@ -67,24 +60,19 @@ STAGE PLANS: 1 2 {(KEY.reducesinkkey0 < 10)} outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: string) sort order: ++++++ - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string), KEY.reducesinkkey4 (type: string), KEY.reducesinkkey5 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join22.q.out b/ql/src/test/results/clientpositive/spark/join22.q.out index 7422d2f..e958889 100644 --- a/ql/src/test/results/clientpositive/spark/join22.q.out +++ b/ql/src/test/results/clientpositive/spark/join22.q.out @@ -20,42 +20,33 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 5 Map Operator Tree: TableScan alias: src4 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -65,16 +56,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col2, _col3 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col2 (type: string) sort order: + Map-reduce partition columns: _col2 (type: string) - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE value expressions: _col3 (type: string) Reducer 3 Reduce Operator Tree: @@ -85,14 +73,11 @@ STAGE PLANS: 0 1 {VALUE._col2} outputColumnNames: _col8 - Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col8 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join23.q.out b/ql/src/test/results/clientpositive/spark/join23.q.out index 08ee10a..7b4e8d1 100644 --- a/ql/src/test/results/clientpositive/spark/join23.q.out +++ b/ql/src/test/results/clientpositive/spark/join23.q.out @@ -20,25 +20,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Map 4 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: @@ -49,24 +43,19 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) sort order: ++++ - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join25.q.out b/ql/src/test/results/clientpositive/spark/join25.q.out index b4459d3..e27d89a 100644 --- a/ql/src/test/results/clientpositive/spark/join25.q.out +++ b/ql/src/test/results/clientpositive/spark/join25.q.out @@ -37,29 +37,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: y - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: x - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: @@ -70,14 +64,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} outputColumnNames: _col0, _col1, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join26.q.out b/ql/src/test/results/clientpositive/spark/join26.q.out index 87f4c39..c5fc5ed 100644 --- a/ql/src/test/results/clientpositive/spark/join26.q.out +++ b/ql/src/test/results/clientpositive/spark/join26.q.out @@ -118,17 +118,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: z - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 2 value expressions: value (type: string) auto parallelism: true @@ -187,17 +184,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: y - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 1 value expressions: value (type: string) auto parallelism: true @@ -254,17 +248,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: x - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE tag: 0 auto parallelism: true Path -> Alias: @@ -328,17 +319,14 @@ STAGE PLANS: 1 {VALUE._col0} 2 {VALUE._col0} outputColumnNames: _col0, _col6, _col11 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col11 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/join27.q.out b/ql/src/test/results/clientpositive/spark/join27.q.out index 7e06b85..2d69ac0 100644 --- a/ql/src/test/results/clientpositive/spark/join27.q.out +++ b/ql/src/test/results/clientpositive/spark/join27.q.out @@ -37,28 +37,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: y - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: value is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: string) sort order: + Map-reduce partition columns: value (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: x - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: value is not null (type: boolean) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: string) sort order: + Map-reduce partition columns: value (type: string) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string) Reducer 2 Reduce Operator Tree: @@ -69,14 +63,11 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join3.q.out b/ql/src/test/results/clientpositive/spark/join3.q.out index b41ebdd..3910df9 100644 --- a/ql/src/test/results/clientpositive/spark/join3.q.out +++ b/ql/src/test/results/clientpositive/spark/join3.q.out @@ -31,42 +31,33 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: src3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 4 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -78,14 +69,11 @@ STAGE PLANS: 1 2 {VALUE._col0} outputColumnNames: _col0, _col11 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col11 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join4.q.out b/ql/src/test/results/clientpositive/spark/join4.q.out index 6512265..0b3c488 100644 --- a/ql/src/test/results/clientpositive/spark/join4.q.out +++ b/ql/src/test/results/clientpositive/spark/join4.q.out @@ -53,37 +53,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 3 Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 15) and (key < 25)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -94,14 +86,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join5.q.out b/ql/src/test/results/clientpositive/spark/join5.q.out index fbf2ed4..55785ec 100644 --- a/ql/src/test/results/clientpositive/spark/join5.q.out +++ b/ql/src/test/results/clientpositive/spark/join5.q.out @@ -53,37 +53,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 3 Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 15) and (key < 25)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -94,14 +86,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join6.q.out b/ql/src/test/results/clientpositive/spark/join6.q.out index b95e0a4..af634fd 100644 --- a/ql/src/test/results/clientpositive/spark/join6.q.out +++ b/ql/src/test/results/clientpositive/spark/join6.q.out @@ -53,37 +53,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 3 Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 15) and (key < 25)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -94,14 +86,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 60 Data size: 642 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join7.q.out b/ql/src/test/results/clientpositive/spark/join7.q.out index 7bd3bdc..fe12fa9 100644 --- a/ql/src/test/results/clientpositive/spark/join7.q.out +++ b/ql/src/test/results/clientpositive/spark/join7.q.out @@ -63,55 +63,43 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 3 Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 15) and (key < 25)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 4 Map Operator Tree: TableScan alias: src3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 20) and (key < 25)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -124,14 +112,11 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int), _col3 (type: string), UDFToInteger(_col4) (type: int), _col5 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 121 Data size: 1284 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join8.q.out b/ql/src/test/results/clientpositive/spark/join8.q.out index 8d97aca..01f608d 100644 --- a/ql/src/test/results/clientpositive/spark/join8.q.out +++ b/ql/src/test/results/clientpositive/spark/join8.q.out @@ -53,37 +53,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key > 10) and (key < 20)) and key is not null) (type: boolean) - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Map 3 Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key > 15) and (key < 25)) and key is not null) (type: boolean) - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -94,17 +86,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 30 Data size: 326 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col2 is null (type: boolean) - Statistics: Num rows: 15 Data size: 163 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col2) (type: int), _col3 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(null) (type: int), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 15 Data size: 163 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 15 Data size: 163 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/join9.q.out b/ql/src/test/results/clientpositive/spark/join9.q.out index 05d68b7..6cb81e8 100644 --- a/ql/src/test/results/clientpositive/spark/join9.q.out +++ b/ql/src/test/results/clientpositive/spark/join9.q.out @@ -85,17 +85,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 1 value expressions: value (type: string) auto parallelism: true @@ -152,17 +149,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: 0 auto parallelism: true Path -> Alias: @@ -226,17 +220,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} outputColumnNames: _col0, _col8 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col8 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out b/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out index 44d7f46..b61aa4c 100644 --- a/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out +++ b/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out @@ -39,23 +39,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: int) sort order: + Map-reduce partition columns: value (type: int) - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: int) Map 3 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: int) Reducer 2 Reduce Operator Tree: @@ -67,14 +63,11 @@ STAGE PLANS: 1 {VALUE._col0} {KEY.reducesinkkey0} nullSafes: [true] outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -124,43 +117,34 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: value is not null (type: boolean) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: int) sort order: + Map-reduce partition columns: value (type: int) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: int) Map 3 Map Operator Tree: TableScan alias: c - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: int) Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: int) Reducer 2 Reduce Operator Tree: @@ -173,14 +157,11 @@ STAGE PLANS: 1 {VALUE._col0} {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 - Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -221,34 +202,28 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: int) sort order: + Map-reduce partition columns: value (type: int) - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: int) Map 3 Map Operator Tree: TableScan alias: c - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: int) Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int) sort order: + Map-reduce partition columns: key (type: int) - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: int) Reducer 2 Reduce Operator Tree: @@ -262,14 +237,11 @@ STAGE PLANS: 2 {KEY.reducesinkkey0} {VALUE._col0} nullSafes: [true] outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 - Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -337,41 +309,32 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: int), key (type: int) sort order: ++ Map-reduce partition columns: value (type: int), key (type: int) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: c - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: value is not null (type: boolean) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int), value (type: int) sort order: ++ Map-reduce partition columns: key (type: int), value (type: int) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: value is not null (type: boolean) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int), value (type: int) sort order: ++ Map-reduce partition columns: key (type: int), value (type: int) - Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -384,14 +347,11 @@ STAGE PLANS: 2 {KEY.reducesinkkey0} {KEY.reducesinkkey1} nullSafes: [true, false] outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 - Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -432,32 +392,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: int), key (type: int) sort order: ++ Map-reduce partition columns: value (type: int), key (type: int) - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: c - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int), value (type: int) sort order: ++ Map-reduce partition columns: key (type: int), value (type: int) - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: int), value (type: int) sort order: ++ Map-reduce partition columns: key (type: int), value (type: int) - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -470,14 +424,11 @@ STAGE PLANS: 2 {KEY.reducesinkkey0} {KEY.reducesinkkey1} nullSafes: [true, true] outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 - Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1573,27 +1524,21 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: value is null (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: null (type: void) sort order: + - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: int) Map 3 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is null (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: null (type: void) sort order: + - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: int) Reducer 2 Reduce Operator Tree: @@ -1605,14 +1550,11 @@ STAGE PLANS: 1 {VALUE._col0} nullSafes: [true] outputColumnNames: _col1, _col5 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: null (type: void), _col1 (type: int), _col5 (type: int), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out index ec8a377..61c85e6 100644 --- a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out +++ b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out @@ -27,15 +27,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: _col1 (type: string) Reducer 2 @@ -43,13 +40,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -110,15 +104,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: - - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: _col1 (type: string) Reducer 2 @@ -126,13 +117,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -193,22 +181,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), key (type: string) outputColumnNames: value, key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum((key + 1)) keys: value (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: _col1 (type: double) Reducer 2 @@ -218,17 +202,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -291,22 +271,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), key (type: string) outputColumnNames: value, key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg((key + 1)) keys: value (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: _col1 (type: struct) Reducer 2 @@ -316,17 +292,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -389,21 +361,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cdouble (type: double) outputColumnNames: cdouble - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: cdouble (type: double) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double) sort order: + Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 Reducer 2 Reduce Operator Tree: @@ -411,17 +379,13 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: double) outputColumnNames: _col0 - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -482,22 +446,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cdouble (type: double) outputColumnNames: ctinyint, cdouble - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT cdouble) keys: ctinyint (type: tinyint), cdouble (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: tinyint), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: tinyint) - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 Reducer 2 Reduce Operator Tree: @@ -506,17 +466,13 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: tinyint), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -579,22 +535,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string) outputColumnNames: ctinyint, cstring1, cstring2 - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT cstring1), count(DISTINCT cstring2) keys: ctinyint (type: tinyint), cstring1 (type: string), cstring2 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: tinyint), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: tinyint) - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 Reducer 2 Reduce Operator Tree: @@ -603,17 +555,13 @@ STAGE PLANS: keys: KEY._col0 (type: tinyint) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: tinyint), _col1 (type: bigint), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 600 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -673,9 +621,11 @@ STAGE PLANS: PREHOOK: query: select key,value from src order by key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: query: select key,value from src order by key limit 0 POSTHOOK: type: QUERY +POSTHOOK: Input: default@src #### A masked pattern was here #### PREHOOK: query: -- 2MR (applied to last RS) explain @@ -701,22 +651,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: value (type: string), key (type: string) outputColumnNames: value, key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(key) keys: value (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 2 Reduce Operator Tree: @@ -725,15 +671,12 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: double) sort order: + - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: _col0 (type: string) Reducer 3 @@ -741,13 +684,10 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -821,44 +761,36 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: _col1 (type: bigint) Reducer 2 @@ -868,22 +800,17 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 2 - Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col0 is not null (type: boolean) - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 3 Reduce Operator Tree: @@ -894,17 +821,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 4 - Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -916,17 +839,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 - Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: _col0 (type: string), _col1 (type: bigint) Reducer 6 @@ -934,18 +853,14 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 3 - Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col0 is not null (type: boolean) - Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 20 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Stage: Stage-0 @@ -977,16 +892,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: value (type: string) sort order: + Map-reduce partition columns: value (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 0.3 value expressions: key (type: string) Reducer 2 @@ -996,17 +908,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 20 - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 20 Data size: 200 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1069,15 +977,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 2.0E-5 value expressions: _col1 (type: string) Reducer 2 @@ -1085,13 +990,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1234,16 +1136,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: concat(key, value, value, value, value, value, value, value, value, value) (type: string) sort order: + Map-reduce partition columns: concat(key, value, value, value, value, value, value, value, value, value) (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE TopN Hash Memory Usage: 2.0E-5 value expressions: key (type: string) Reducer 2 @@ -1253,17 +1152,13 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: double) outputColumnNames: _col0 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out index ecafd37..3b669fc 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out @@ -75,7 +75,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -85,8 +84,6 @@ STAGE PLANS: Stage: Stage-4 Spark - Edges: - Reducer 4 <- Map 1 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 1 @@ -94,29 +91,16 @@ STAGE PLANS: TableScan Filter Operator predicate: (ds <= '2008-04-08') (type: boolean) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reducer 4 - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 Stage: Stage-3 Dependency Collection @@ -155,8 +139,6 @@ STAGE PLANS: Stage: Stage-5 Spark - Edges: - Reducer 5 <- Map 2 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 2 @@ -164,29 +146,16 @@ STAGE PLANS: TableScan Filter Operator predicate: (ds > '2008-04-08') (type: boolean) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reducer 5 - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 PREHOOK: query: from srcpart insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out index 35da478..bca90d8 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out @@ -55,37 +55,22 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Spark - Edges: - Reducer 2 <- Map 1 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part10 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part10 Stage: Stage-2 Dependency Collection diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out index 4713d51..ecd3871 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out @@ -64,59 +64,46 @@ STAGE PLANS: Spark Edges: Reducer 2 <- Map 1 (GROUP) - Reducer 4 <- Union 3 (GROUP SORT) - Reducer 6 <- Map 5 (GROUP) - Reducer 8 <- Map 7 (GROUP) - Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE), Reducer 8 (NONE) + Reducer 5 <- Map 4 (GROUP) + Reducer 7 <- Map 6 (GROUP) + Union 3 <- Reducer 2 (NONE), Reducer 5 (NONE), Reducer 7 (NONE) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 'k2' (type: string), '' (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 85000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 2 - Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string), _col1 (type: string) - Map 5 + Map 4 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 'k3' (type: string), ' ' (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 85500 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 2 - Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 2 Data size: 342 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string), _col1 (type: string) - Map 7 + Map 6 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 'k1' (type: string), UDFToString(null) (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 85000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 2 - Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -128,24 +115,14 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - value expressions: _col0 (type: string), _col1 (type: string) - Reducer 4 - Reduce Operator Tree: - Extract - Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 6 Data size: 1026 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part14 - Reducer 6 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part14 + Reducer 5 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) @@ -155,12 +132,14 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - value expressions: _col0 (type: string), _col1 (type: string) - Reducer 8 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part14 + Reducer 7 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) @@ -170,11 +149,13 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - value expressions: _col0 (type: string), _col1 (type: string) + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part14 Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out index 52e147c..e5773b4 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out @@ -55,23 +55,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) Reducer 2 Reduce Operator Tree: Extract - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out index 35e948a..a596554 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out @@ -53,37 +53,22 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Spark - Edges: - Reducer 2 <- Map 1 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part3 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part3 Stage: Stage-2 Dependency Collection diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out index cea414c..884acc3 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out @@ -64,37 +64,22 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Spark - Edges: - Reducer 2 <- Map 1 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part4 Stage: Stage-2 Dependency Collection diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out index 8c5aa62..117cfa1 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part5.q.out @@ -36,37 +36,22 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Spark - Edges: - Reducer 2 <- Map 1 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part5 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part5 Stage: Stage-2 Dependency Collection diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out index d626d9c..50c052d 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out @@ -130,7 +130,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE GatherStats: false File Output Operator compressed: false @@ -344,8 +343,6 @@ STAGE PLANS: Stage: Stage-4 Spark - Edges: - Reducer 4 <- Map 1 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 1 @@ -355,19 +352,36 @@ STAGE PLANS: Filter Operator isSamplingPred: false predicate: (ds <= '2008-04-08') (type: boolean) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - auto parallelism: true + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments defaultdefault + columns.types string:string +#### A masked pattern was here #### + name default.nzhang_part8 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct nzhang_part8 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part8 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -393,39 +407,6 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Truncated Path -> Alias: #### A masked pattern was here #### - Reducer 4 - Needs Tagging: false - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 1 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.nzhang_part8 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct nzhang_part8 { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part8 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false Stage: Stage-3 Dependency Collection @@ -494,8 +475,6 @@ STAGE PLANS: Stage: Stage-5 Spark - Edges: - Reducer 5 <- Map 2 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 2 @@ -505,19 +484,37 @@ STAGE PLANS: Filter Operator isSamplingPred: false predicate: (ds > '2008-04-08') (type: boolean) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - auto parallelism: true + File Output Operator + compressed: false + GlobalTableId: 2 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Static Partition Specification: ds=2008-12-31/ +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key,value + columns.comments defaultdefault + columns.types string:string +#### A masked pattern was here #### + name default.nzhang_part8 + partition_columns ds/hr + partition_columns.types string:string + serialization.ddl struct nzhang_part8 { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part8 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -543,40 +540,6 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Truncated Path -> Alias: #### A masked pattern was here #### - Reducer 5 - Needs Tagging: false - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 2 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Static Partition Specification: ds=2008-12-31/ - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.nzhang_part8 - partition_columns ds/hr - partition_columns.types string:string - serialization.ddl struct nzhang_part8 { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part8 - TotalFiles: 1 - GatherStats: true - MultiFileSpray: false PREHOOK: query: from srcpart insert overwrite table nzhang_part8 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out index 5f712a8..bab435d 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out @@ -55,37 +55,22 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Spark - Edges: - Reducer 2 <- Map 1 (GROUP SORT) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part9 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part9 Stage: Stage-2 Dependency Collection diff --git a/ql/src/test/results/clientpositive/spark/mapreduce1.q.out b/ql/src/test/results/clientpositive/spark/mapreduce1.q.out index 6fb8529..142616f 100644 --- a/ql/src/test/results/clientpositive/spark/mapreduce1.q.out +++ b/ql/src/test/results/clientpositive/spark/mapreduce1.q.out @@ -39,33 +39,27 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), UDFToInteger((key / 10)) (type: int), UDFToInteger((key % 10)) (type: int), value (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string), _col2 (type: string) sort order: ++ Map-reduce partition columns: _col3 (type: string), _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: UDFToInteger(VALUE._col0) (type: int), UDFToInteger(VALUE._col1) (type: int), UDFToInteger(VALUE._col2) (type: int), VALUE._col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/mapreduce2.q.out b/ql/src/test/results/clientpositive/spark/mapreduce2.q.out index f8c3700..fa3bdd7 100644 --- a/ql/src/test/results/clientpositive/spark/mapreduce2.q.out +++ b/ql/src/test/results/clientpositive/spark/mapreduce2.q.out @@ -37,32 +37,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), UDFToInteger((key / 10)) (type: int), UDFToInteger((key % 10)) (type: int), value (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: Map-reduce partition columns: _col3 (type: string), _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: UDFToInteger(VALUE._col0) (type: int), UDFToInteger(VALUE._col1) (type: int), UDFToInteger(VALUE._col2) (type: int), VALUE._col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/merge1.q.out b/ql/src/test/results/clientpositive/spark/merge1.q.out index 5384b8f..da89a2a 100644 --- a/ql/src/test/results/clientpositive/spark/merge1.q.out +++ b/ql/src/test/results/clientpositive/spark/merge1.q.out @@ -40,22 +40,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -64,14 +60,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -541,14 +534,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test_src - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -656,14 +646,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test_src - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/merge2.q.out b/ql/src/test/results/clientpositive/spark/merge2.q.out index bbedb65..e98a77e 100644 --- a/ql/src/test/results/clientpositive/spark/merge2.q.out +++ b/ql/src/test/results/clientpositive/spark/merge2.q.out @@ -40,22 +40,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -64,14 +60,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -541,14 +534,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test_src - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -656,14 +646,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test_src - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out index fbbb1d1..834d992 100644 --- a/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out +++ b/ql/src/test/results/clientpositive/spark/metadata_only_queries.q.out @@ -197,19 +197,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: stats_tbl - Statistics: Num rows: 9999 Data size: 1030908 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: s (type: string), bo (type: boolean), bin (type: binary), si (type: smallint), i (type: int), b (type: bigint) outputColumnNames: s, bo, bin, si, i, b - Statistics: Num rows: 9999 Data size: 1030908 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: double), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: int), _col9 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -217,14 +213,11 @@ STAGE PLANS: aggregations: count(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(VALUE._col6), count(VALUE._col7), max(VALUE._col8), min(VALUE._col9) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: double), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: int), _col9 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -257,19 +250,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: stats_tbl_part - Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: s (type: string), bo (type: boolean), bin (type: binary), si (type: smallint), i (type: int), b (type: bigint) outputColumnNames: s, bo, bin, si, i, b - Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si), max(i), min(b) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: double), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: int), _col9 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -277,14 +266,11 @@ STAGE PLANS: aggregations: count(VALUE._col0), sum(VALUE._col1), sum(VALUE._col2), count(VALUE._col3), count(VALUE._col4), count(VALUE._col5), count(VALUE._col6), count(VALUE._col7), max(VALUE._col8), min(VALUE._col9) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: double), _col3 (type: bigint), _col4 (type: bigint), _col5 (type: bigint), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: int), _col9 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -352,9 +338,11 @@ STAGE PLANS: PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl #### A masked pattern was here #### POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl #### A masked pattern was here #### 9999 9999 1999.8 9999 9999 9999 9999 9999 PREHOOK: query: explain @@ -375,9 +363,11 @@ STAGE PLANS: PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl #### A masked pattern was here #### POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl #### A masked pattern was here #### 65536 65791 4294967296 4294967551 0.009999999776482582 99.9800033569336 0.01 50.0 PREHOOK: query: explain @@ -398,9 +388,11 @@ STAGE PLANS: PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 9489 9489 1897.8 9489 9489 9489 9489 9489 PREHOOK: query: explain @@ -421,9 +413,11 @@ STAGE PLANS: PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 65536 65791 4294967296 4294967551 0.009999999776482582 99.9800033569336 0.01 50.0 PREHOOK: query: explain select count(ts) from stats_tbl_part @@ -445,19 +439,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: stats_tbl_part - Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ts (type: timestamp) outputColumnNames: ts - Statistics: Num rows: 9489 Data size: 978785 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(ts) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -465,14 +455,11 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out b/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out index 664e065..5be958f 100644 --- a/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out +++ b/ql/src/test/results/clientpositive/spark/metadata_only_queries_with_filters.q.out @@ -160,9 +160,11 @@ STAGE PLANS: PREHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010 PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*), count(1), sum(1), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt = 2010 POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 2322 2322 2322 2322 2322 2322 2322 65791 4294967296 99.9800033569336 0.03 PREHOOK: query: explain @@ -183,16 +185,20 @@ STAGE PLANS: PREHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010 PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*), count(1), sum(1), sum(2), count(s), count(bo), count(bin), count(si), max(i), min(b), max(f), min(d) from stats_tbl_part where dt > 2010 POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 2219 2219 2219 4438 2219 2219 2219 2219 65791 4294967296 99.95999908447266 0.04 PREHOOK: query: select count(*) from stats_tbl_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*) from stats_tbl_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 4541 PREHOOK: query: select count(*)/2 from stats_tbl_part diff --git a/ql/src/test/results/clientpositive/spark/multi_insert.q.out b/ql/src/test/results/clientpositive/spark/multi_insert.q.out index a503967..31ebbeb 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert.q.out @@ -47,7 +47,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -64,14 +63,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -116,14 +112,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -214,7 +207,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -231,14 +223,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -283,14 +272,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -381,7 +367,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -398,14 +383,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -450,14 +432,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -548,7 +527,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -565,14 +543,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -617,14 +592,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -715,38 +687,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -754,19 +717,15 @@ STAGE PLANS: name: default.src_multi1 Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -879,38 +838,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -918,19 +868,15 @@ STAGE PLANS: name: default.src_multi1 Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1043,38 +989,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1082,19 +1019,15 @@ STAGE PLANS: name: default.src_multi1 Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1207,38 +1140,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1246,19 +1170,15 @@ STAGE PLANS: name: default.src_multi1 Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1407,14 +1327,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1459,14 +1376,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1612,14 +1526,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1664,14 +1575,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1817,14 +1725,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1869,14 +1774,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2022,14 +1924,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2074,14 +1973,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2190,7 +2086,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -2207,14 +2102,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '0' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2238,14 +2130,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 2) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2266,14 +2155,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 4) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '4' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2324,7 +2210,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -2341,14 +2226,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '0' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2372,14 +2254,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 2) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2400,14 +2279,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 4) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '4' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2458,7 +2334,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -2475,14 +2350,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '0' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2506,14 +2378,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 2) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2534,14 +2403,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 4) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '4' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2592,7 +2458,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -2609,14 +2474,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '0' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2640,14 +2502,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 2) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2668,14 +2527,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 4) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '4' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out index f0a654e..2186728 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out @@ -51,39 +51,30 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > 450) or (key > 500)) (type: boolean) - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 > 450) (type: boolean) - Statistics: Num rows: 110 Data size: 1168 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -91,20 +82,16 @@ STAGE PLANS: name: default.e1 Filter Operator predicate: (KEY._col0 > 500) (type: boolean) - Statistics: Num rows: 110 Data size: 1168 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -248,36 +235,28 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 > 450) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -288,14 +267,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out index f6890fb..49f5898 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out @@ -50,42 +50,33 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), UDFToDouble(key) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: double) outputColumnNames: _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 3 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: complete outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -95,14 +86,11 @@ STAGE PLANS: aggregations: percentile_approx(VALUE._col0, 0.5) mode: complete outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: double) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out index 15c949a..0a983d8 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out @@ -56,45 +56,36 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), UDFToDouble(key) (type: double), value (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: double), VALUE._col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col2 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 3 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToDouble(_col1) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -105,14 +96,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1:0._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: double), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -182,45 +170,36 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), UDFToDouble(key) (type: double), value (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: double), VALUE._col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col2 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 3 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToDouble(_col1) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -231,14 +210,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1:0._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: double), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1625,7 +1601,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -1645,18 +1620,15 @@ STAGE PLANS: Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT value) keys: key (type: string), value (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 4 Reduce Operator Tree: Group By Operator @@ -1664,14 +1636,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToDouble(_col1) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1719,18 +1688,15 @@ STAGE PLANS: Select Operator expressions: value (type: string), key (type: string) outputColumnNames: value, key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT key) keys: value (type: string), key (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Group By Operator @@ -1738,14 +1704,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToDouble(_col1) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1793,23 +1756,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), UDFToDouble(key) (type: double), value (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: string) sort order: ++++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: string) Reducer 4 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: double), VALUE._col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -1829,30 +1788,24 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: double), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col2 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 5 Reduce Operator Tree: Forward - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT KEY._col1:0._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToDouble(_col1) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1863,14 +1816,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1:0._col0 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: double), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1929,20 +1879,16 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: double), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(DISTINCT _col1) keys: _col0 (type: string), _col1 (type: double), _col2 (type: string) mode: complete outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), UDFToDouble(_col3) (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out index b502d27..68b1312 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out @@ -76,7 +76,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src_10 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -92,21 +91,16 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -115,20 +109,15 @@ STAGE PLANS: Select Operator expressions: array((key + 1),(key + 2)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE UDTF Operator - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -172,21 +161,16 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -195,20 +179,15 @@ STAGE PLANS: Select Operator expressions: array((key + 3),(key + 4)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE UDTF Operator - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -330,7 +309,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src_10 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -348,55 +326,43 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Select Operator expressions: array((key + 1),(key + 2)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE UDTF Operator - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 4 Reduce Operator Tree: @@ -405,14 +371,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -458,55 +421,43 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Select Operator expressions: array((key + 3),(key + 4)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE UDTF Operator - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double) Reducer 5 Reduce Operator Tree: @@ -515,14 +466,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -628,7 +576,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src_10 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false table: @@ -646,55 +593,43 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: double) Select Operator expressions: array((key + 1),(key + 2)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE UDTF Operator - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: double) Reducer 4 Reduce Operator Tree: @@ -703,14 +638,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -770,37 +702,29 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 200) or (key < 200)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: value (type: string) Reducer 5 Reduce Operator Tree: Forward - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (KEY._col0 > 200) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(VALUE._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -808,20 +732,16 @@ STAGE PLANS: name: default.src_lv2 Filter Operator predicate: (KEY._col0 < 200) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(VALUE._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -947,7 +867,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src_10 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false table: @@ -965,54 +884,42 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col5 (type: double), _col0 (type: string) outputColumnNames: _col5, _col0 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col0) keys: _col5 (type: double), _col0 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: array((key + 1),(key + 2)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE UDTF Operator - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col5 (type: double), _col0 (type: string) outputColumnNames: _col5, _col0 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col0) keys: _col5 (type: double), _col0 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reducer 5 Reduce Operator Tree: Group By Operator @@ -1020,14 +927,11 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: double), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1086,54 +990,42 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col5 (type: double), _col0 (type: string) outputColumnNames: _col5, _col0 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col0) keys: _col5 (type: double), _col0 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: array((key + 3),(key + 4)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE UDTF Operator - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col5 (type: double), _col0 (type: string) outputColumnNames: _col5, _col0 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col0) keys: _col5 (type: double), _col0 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: double), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: double) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reducer 6 Reduce Operator Tree: Group By Operator @@ -1141,14 +1033,11 @@ STAGE PLANS: keys: KEY._col0 (type: double) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: double), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1167,18 +1056,15 @@ STAGE PLANS: Select Operator expressions: value (type: string), key (type: string) outputColumnNames: value, key - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT key) keys: value (type: string), key (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Reducer 7 Reduce Operator Tree: Group By Operator @@ -1186,14 +1072,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1349,7 +1232,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src_10 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false table: @@ -1367,54 +1249,42 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col5) keys: _col0 (type: string), _col5 (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: array((key + 1),(key + 2)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE UDTF Operator - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col5) keys: _col0 (type: string), _col5 (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reducer 5 Reduce Operator Tree: Group By Operator @@ -1422,14 +1292,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1501,54 +1368,42 @@ STAGE PLANS: Map Operator Tree: TableScan Lateral View Forward - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col5) keys: _col0 (type: string), _col5 (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: array((key + 3),(key + 4)) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE UDTF Operator - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE function name: explode Lateral View Join Operator outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col5 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col5) keys: _col0 (type: string), _col5 (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 228 Basic stats: PARTIAL Column stats: NONE Reducer 6 Reduce Operator Tree: Group By Operator @@ -1556,14 +1411,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1581,36 +1433,28 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 200) or (key < 200)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: value (type: string), key (type: string) sort order: ++ Map-reduce partition columns: value (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 7 Reduce Operator Tree: Forward - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator predicate: (KEY._col1:0._col0 > 200) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: sum(DISTINCT KEY._col1:0._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1618,20 +1462,16 @@ STAGE PLANS: name: default.src_lv3 Filter Operator predicate: (KEY._col1:0._col0 < 200) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: sum(DISTINCT KEY._col1:0._col0) keys: KEY._col0 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out index 32eb45e..f7867ac 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out @@ -47,7 +47,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -64,14 +63,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -116,14 +112,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -214,7 +207,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -231,14 +223,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -283,14 +272,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -381,7 +367,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -398,14 +383,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -450,14 +432,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -548,7 +527,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -565,14 +543,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -617,14 +592,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -715,38 +687,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -754,19 +717,15 @@ STAGE PLANS: name: default.src_multi1 Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -879,38 +838,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -918,19 +868,15 @@ STAGE PLANS: name: default.src_multi1 Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1043,38 +989,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1082,19 +1019,15 @@ STAGE PLANS: name: default.src_multi1 Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1207,38 +1140,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1246,19 +1170,15 @@ STAGE PLANS: name: default.src_multi1 Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1407,14 +1327,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1459,14 +1376,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1612,14 +1526,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1664,14 +1575,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1817,14 +1725,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1869,14 +1774,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2022,14 +1924,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2074,14 +1973,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2190,7 +2086,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -2207,14 +2102,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '0' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2238,14 +2130,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 2) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2266,14 +2155,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 4) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '4' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2324,7 +2210,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -2341,14 +2226,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '0' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2372,14 +2254,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 2) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2400,14 +2279,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 4) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '4' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2458,7 +2334,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -2475,14 +2350,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '0' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2506,14 +2378,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 2) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2534,14 +2403,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 4) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '4' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2592,7 +2458,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -2609,14 +2474,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '0' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2640,14 +2502,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 2) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '2' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2668,14 +2527,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key = 4) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '4' (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2726,68 +2582,53 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2797,10 +2638,8 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2860,68 +2699,53 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2931,10 +2755,8 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2994,68 +2816,53 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3065,10 +2872,8 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3128,68 +2933,53 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3199,10 +2989,8 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3269,7 +3057,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -3286,14 +3073,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3338,14 +3122,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3365,65 +3146,51 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 6 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3433,10 +3200,8 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3549,7 +3314,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -3566,14 +3330,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3618,14 +3379,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3645,65 +3403,51 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 6 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3713,10 +3457,8 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3829,7 +3571,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -3846,14 +3587,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3898,14 +3636,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3925,65 +3660,51 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 6 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3993,10 +3714,8 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4109,7 +3828,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -4126,14 +3844,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4178,14 +3893,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key > 10) and (key < 20)) (type: boolean) - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4205,65 +3917,51 @@ STAGE PLANS: TableScan Filter Operator predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Forward - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (KEY._col0 < 10) (type: boolean) - Statistics: Num rows: 73 Data size: 775 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Filter Operator predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean) - Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 6 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4273,10 +3971,8 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out b/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out index a0e5dde..dbb78a6 100644 --- a/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out +++ b/ql/src/test/results/clientpositive/spark/multigroupby_singlemr.q.out @@ -67,7 +67,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -87,18 +86,15 @@ STAGE PLANS: Select Operator expressions: c1 (type: int), c2 (type: int) outputColumnNames: c1, c2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c2) keys: c1 (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -107,14 +103,11 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -162,18 +155,15 @@ STAGE PLANS: Select Operator expressions: c1 (type: int), c2 (type: int), c3 (type: int) outputColumnNames: c1, c2, c3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c3) keys: c1 (type: int), c2 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -182,14 +172,11 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -225,7 +212,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -245,18 +231,15 @@ STAGE PLANS: Select Operator expressions: c1 (type: int), c2 (type: int) outputColumnNames: c1, c2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c2) keys: c1 (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -265,14 +248,11 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -320,18 +300,15 @@ STAGE PLANS: Select Operator expressions: c2 (type: int), c1 (type: int), c3 (type: int) outputColumnNames: c2, c1, c3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c3) keys: c2 (type: int), c1 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -340,14 +317,11 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -383,7 +357,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -403,18 +376,15 @@ STAGE PLANS: Select Operator expressions: c1 (type: int), c2 (type: int), c3 (type: int), c4 (type: int) outputColumnNames: c1, c2, c3, c4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c4) keys: c1 (type: int), c2 (type: int), c3 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -423,14 +393,11 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -478,18 +445,15 @@ STAGE PLANS: Select Operator expressions: c1 (type: int), c2 (type: int), c3 (type: int) outputColumnNames: c1, c2, c3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c3) keys: c1 (type: int), c2 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -498,14 +462,11 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -541,34 +502,27 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: c1 (type: int), c2 (type: int), c3 (type: int), c4 (type: int) outputColumnNames: c1, c2, c3, c4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: c1 (type: int), c2 (type: int), c3 (type: int) sort order: +++ Map-reduce partition columns: c1 (type: int), c2 (type: int), c3 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: c4 (type: int) Reducer 2 Reduce Operator Tree: Forward - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(VALUE._col0) keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: complete outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -579,14 +533,11 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col2 (type: int), KEY._col1 (type: int) mode: complete outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), _col2 (type: int), _col1 (type: int), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -656,7 +607,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -676,18 +626,15 @@ STAGE PLANS: Select Operator expressions: c1 (type: int), c2 (type: int), c3 (type: int), c4 (type: int) outputColumnNames: c1, c2, c3, c4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c4) keys: c1 (type: int), c2 (type: int), c3 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int) sort order: +++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -696,14 +643,11 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), UDFToInteger(_col3) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -764,18 +708,15 @@ STAGE PLANS: Select Operator expressions: c1 (type: int), c2 (type: int), c3 (type: int) outputColumnNames: c1, c2, c3 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c3) keys: c1 (type: int), c2 (type: int) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reducer 6 Reduce Operator Tree: @@ -784,14 +725,11 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int), UDFToInteger(_col2) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -810,18 +748,15 @@ STAGE PLANS: Select Operator expressions: c1 (type: int), c2 (type: int) outputColumnNames: c1, c2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(c2) keys: c1 (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 7 Reduce Operator Tree: @@ -830,14 +765,11 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out b/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out index 0254170..d4db8a4 100644 --- a/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out +++ b/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out @@ -37,16 +37,13 @@ STAGE PLANS: Processor Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: false (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE ListSink PREHOOK: query: select key from src where false @@ -58,6 +55,96 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### PREHOOK: query: explain extended +select count(key) from srcpart where 1=2 group by key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +select count(key) from srcpart where 1=2 group by key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + srcpart + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTION + count + TOK_TABLE_OR_COL + key + TOK_WHERE + = + 1 + 2 + TOK_GROUPBY + TOK_TABLE_OR_COL + key + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP) +#### A masked pattern was here #### + Vertices: + Map 1 + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col1 (type: bigint) + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from srcpart where 1=2 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from srcpart where 1=2 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +#### A masked pattern was here #### +PREHOOK: query: explain extended select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key PREHOOK: type: QUERY POSTHOOK: query: explain extended @@ -136,21 +223,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: false (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 0 auto parallelism: true Path -> Alias: @@ -206,18 +289,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col0 (type: string) auto parallelism: false @@ -426,17 +505,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -458,15 +534,12 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Limit Number of rows: 0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 1 auto parallelism: true @@ -565,20 +638,16 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(key) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -778,24 +847,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: false (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(key) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -941,8 +1005,8 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -0 2000 +0 PREHOOK: query: explain extended select * from (select key from src where false) a left outer join (select value from srcpart limit 0) b PREHOOK: type: QUERY @@ -1013,19 +1077,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: false (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 0 value expressions: _col0 (type: string) auto parallelism: false @@ -1082,18 +1142,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: value (type: string) outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col0 (type: string) auto parallelism: false @@ -1302,17 +1358,14 @@ STAGE PLANS: 0 {VALUE._col0} 1 {VALUE._col0} outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1334,13 +1387,10 @@ STAGE PLANS: Select Operator expressions: VALUE._col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Limit Number of rows: 0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 1 value expressions: _col0 (type: string) auto parallelism: false @@ -1450,13 +1500,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE tag: 1 auto parallelism: true Path -> Alias: @@ -1738,13 +1786,11 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE tag: 0 auto parallelism: true Path -> Alias: diff --git a/ql/src/test/results/clientpositive/spark/order.q.out b/ql/src/test/results/clientpositive/spark/order.q.out index 7d2c54c..c337afa 100644 --- a/ql/src/test/results/clientpositive/spark/order.q.out +++ b/ql/src/test/results/clientpositive/spark/order.q.out @@ -19,28 +19,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: x - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -91,28 +85,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: x - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: - - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/order2.q.out b/ql/src/test/results/clientpositive/spark/order2.q.out index 94c7b4f..bb2e765 100644 --- a/ql/src/test/results/clientpositive/spark/order2.q.out +++ b/ql/src/test/results/clientpositive/spark/order2.q.out @@ -23,35 +23,27 @@ STAGE PLANS: Map Operator Tree: TableScan alias: x - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 < 10) (type: boolean) - Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 30 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/parallel.q.out b/ql/src/test/results/clientpositive/spark/parallel.q.out index dc5e788..acd418f 100644 --- a/ql/src/test/results/clientpositive/spark/parallel.q.out +++ b/ql/src/test/results/clientpositive/spark/parallel.q.out @@ -48,53 +48,42 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string), value (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Forward - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -104,14 +93,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: complete outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -163,7 +149,6 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@src_a POSTHOOK: Output: default@src_b -POSTHOOK: Lineage: src_a.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src_a.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: src_b.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src_b.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] diff --git a/ql/src/test/results/clientpositive/spark/parallel_join0.q.out b/ql/src/test/results/clientpositive/spark/parallel_join0.q.out index 3149825..2a35055 100644 --- a/ql/src/test/results/clientpositive/spark/parallel_join0.q.out +++ b/ql/src/test/results/clientpositive/spark/parallel_join0.q.out @@ -34,33 +34,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Map 4 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: @@ -71,24 +63,19 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) sort order: ++++ - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -135,47 +122,6 @@ POSTHOOK: query: SELECT src1.key as k1, src1.value as v1, POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -5 val_5 9 val_9 -5 val_5 9 val_9 -5 val_5 9 val_9 -8 val_8 0 val_0 -8 val_8 0 val_0 -8 val_8 0 val_0 -8 val_8 2 val_2 -8 val_8 4 val_4 -8 val_8 5 val_5 -8 val_8 5 val_5 -8 val_8 5 val_5 -8 val_8 8 val_8 -8 val_8 9 val_9 -9 val_9 0 val_0 -9 val_9 0 val_0 -9 val_9 0 val_0 -9 val_9 2 val_2 -9 val_9 4 val_4 -9 val_9 5 val_5 -9 val_9 5 val_5 -9 val_9 5 val_5 -9 val_9 8 val_8 -9 val_9 9 val_9 -5 val_5 2 val_2 -5 val_5 2 val_2 -5 val_5 2 val_2 -5 val_5 4 val_4 -5 val_5 4 val_4 -5 val_5 4 val_4 -5 val_5 5 val_5 -5 val_5 5 val_5 -5 val_5 5 val_5 -5 val_5 5 val_5 -5 val_5 5 val_5 -5 val_5 5 val_5 -5 val_5 5 val_5 -5 val_5 5 val_5 -5 val_5 5 val_5 -5 val_5 8 val_8 -5 val_5 8 val_8 -5 val_5 8 val_8 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 0 val_0 @@ -235,3 +181,44 @@ POSTHOOK: Input: default@src 5 val_5 0 val_0 5 val_5 0 val_0 5 val_5 0 val_0 +5 val_5 2 val_2 +5 val_5 2 val_2 +5 val_5 2 val_2 +5 val_5 4 val_4 +5 val_5 4 val_4 +5 val_5 4 val_4 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 5 val_5 +5 val_5 8 val_8 +5 val_5 8 val_8 +5 val_5 8 val_8 +5 val_5 9 val_9 +5 val_5 9 val_9 +5 val_5 9 val_9 +8 val_8 0 val_0 +8 val_8 0 val_0 +8 val_8 0 val_0 +8 val_8 2 val_2 +8 val_8 4 val_4 +8 val_8 5 val_5 +8 val_8 5 val_5 +8 val_8 5 val_5 +8 val_8 8 val_8 +8 val_8 9 val_9 +9 val_9 0 val_0 +9 val_9 0 val_0 +9 val_9 0 val_0 +9 val_9 2 val_2 +9 val_9 4 val_4 +9 val_9 5 val_5 +9 val_9 5 val_5 +9 val_9 5 val_5 +9 val_9 8 val_8 +9 val_9 9 val_9 diff --git a/ql/src/test/results/clientpositive/spark/parallel_join1.q.out b/ql/src/test/results/clientpositive/spark/parallel_join1.q.out index 7cd0af6..c772c88 100644 --- a/ql/src/test/results/clientpositive/spark/parallel_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/parallel_join1.q.out @@ -35,29 +35,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: src1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Join Operator @@ -67,14 +61,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} outputColumnNames: _col0, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -118,6 +109,226 @@ POSTHOOK: query: SELECT dest_j1.* FROM dest_j1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_j1 #### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +0 val_0 +103 val_103 +103 val_103 +103 val_103 +103 val_103 +11 val_11 +114 val_114 +118 val_118 +118 val_118 +118 val_118 +118 val_118 +125 val_125 +125 val_125 +125 val_125 +125 val_125 +129 val_129 +129 val_129 +129 val_129 +129 val_129 +136 val_136 +143 val_143 +15 val_15 +15 val_15 +15 val_15 +15 val_15 +150 val_150 +158 val_158 +165 val_165 +165 val_165 +165 val_165 +165 val_165 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +172 val_172 +172 val_172 +172 val_172 +172 val_172 +176 val_176 +176 val_176 +176 val_176 +176 val_176 +183 val_183 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +187 val_187 +19 val_19 +190 val_190 +194 val_194 +202 val_202 +213 val_213 +213 val_213 +213 val_213 +213 val_213 +217 val_217 +217 val_217 +217 val_217 +217 val_217 +224 val_224 +224 val_224 +224 val_224 +224 val_224 +228 val_228 +235 val_235 +239 val_239 +239 val_239 +239 val_239 +239 val_239 +242 val_242 +242 val_242 +242 val_242 +242 val_242 +257 val_257 +26 val_26 +26 val_26 +26 val_26 +26 val_26 +260 val_260 +275 val_275 +282 val_282 +282 val_282 +282 val_282 +282 val_282 +286 val_286 +305 val_305 +309 val_309 +309 val_309 +309 val_309 +309 val_309 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +316 val_316 +323 val_323 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +327 val_327 +33 val_33 +338 val_338 +341 val_341 +345 val_345 +356 val_356 +367 val_367 +367 val_367 +367 val_367 +367 val_367 +37 val_37 +37 val_37 +37 val_37 +37 val_37 +374 val_374 +378 val_378 +389 val_389 +392 val_392 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +396 val_396 +4 val_4 +400 val_400 +404 val_404 +404 val_404 +404 val_404 +404 val_404 +411 val_411 +419 val_419 +437 val_437 +44 val_44 +444 val_444 +448 val_448 +455 val_455 +459 val_459 +459 val_459 +459 val_459 +459 val_459 +462 val_462 +462 val_462 +462 val_462 +462 val_462 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +466 val_466 +477 val_477 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +480 val_480 +484 val_484 +491 val_491 +495 val_495 +51 val_51 +51 val_51 +51 val_51 +51 val_51 +66 val_66 +77 val_77 +8 val_8 +80 val_80 +84 val_84 +84 val_84 +84 val_84 +84 val_84 +95 val_95 +95 val_95 +95 val_95 +95 val_95 10 val_10 113 val_113 113 val_113 @@ -422,250 +633,6 @@ POSTHOOK: Input: default@dest_j1 98 val_98 98 val_98 98 val_98 -100 val_100 -100 val_100 -100 val_100 -100 val_100 -104 val_104 -104 val_104 -104 val_104 -104 val_104 -111 val_111 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -119 val_119 -12 val_12 -12 val_12 -12 val_12 -12 val_12 -126 val_126 -133 val_133 -137 val_137 -137 val_137 -137 val_137 -137 val_137 -155 val_155 -162 val_162 -166 val_166 -177 val_177 -180 val_180 -191 val_191 -191 val_191 -191 val_191 -191 val_191 -195 val_195 -195 val_195 -195 val_195 -195 val_195 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -199 val_199 -203 val_203 -203 val_203 -203 val_203 -203 val_203 -207 val_207 -207 val_207 -207 val_207 -207 val_207 -214 val_214 -218 val_218 -221 val_221 -221 val_221 -221 val_221 -221 val_221 -229 val_229 -229 val_229 -229 val_229 -229 val_229 -247 val_247 -258 val_258 -265 val_265 -265 val_265 -265 val_265 -265 val_265 -27 val_27 -272 val_272 -272 val_272 -272 val_272 -272 val_272 -283 val_283 -287 val_287 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -298 val_298 -30 val_30 -302 val_302 -306 val_306 -317 val_317 -317 val_317 -317 val_317 -317 val_317 -331 val_331 -331 val_331 -331 val_331 -331 val_331 -335 val_335 -339 val_339 -34 val_34 -342 val_342 -342 val_342 -342 val_342 -342 val_342 -353 val_353 -353 val_353 -353 val_353 -353 val_353 -360 val_360 -364 val_364 -368 val_368 -375 val_375 -379 val_379 -382 val_382 -382 val_382 -382 val_382 -382 val_382 -386 val_386 -393 val_393 -397 val_397 -397 val_397 -397 val_397 -397 val_397 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -401 val_401 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -409 val_409 -41 val_41 -427 val_427 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -430 val_430 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -438 val_438 -449 val_449 -452 val_452 -463 val_463 -463 val_463 -463 val_463 -463 val_463 -467 val_467 -470 val_470 -478 val_478 -478 val_478 -478 val_478 -478 val_478 -481 val_481 -485 val_485 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -489 val_489 -492 val_492 -492 val_492 -492 val_492 -492 val_492 -496 val_496 -5 val_5 -5 val_5 -5 val_5 -5 val_5 -5 val_5 -5 val_5 -5 val_5 -5 val_5 -5 val_5 -67 val_67 -67 val_67 -67 val_67 -67 val_67 -70 val_70 -70 val_70 -70 val_70 -70 val_70 -70 val_70 -70 val_70 -70 val_70 -70 val_70 -70 val_70 -74 val_74 -78 val_78 -85 val_85 -9 val_9 -92 val_92 -96 val_96 105 val_105 116 val_116 134 val_134 @@ -926,223 +893,247 @@ POSTHOOK: Input: default@dest_j1 97 val_97 97 val_97 97 val_97 -0 val_0 -0 val_0 -0 val_0 -0 val_0 -0 val_0 -0 val_0 -0 val_0 -0 val_0 -0 val_0 -103 val_103 -103 val_103 -103 val_103 -103 val_103 -11 val_11 -114 val_114 -118 val_118 -118 val_118 -118 val_118 -118 val_118 -125 val_125 -125 val_125 -125 val_125 -125 val_125 -129 val_129 -129 val_129 -129 val_129 -129 val_129 -136 val_136 -143 val_143 -15 val_15 -15 val_15 -15 val_15 -15 val_15 -150 val_150 -158 val_158 -165 val_165 -165 val_165 -165 val_165 -165 val_165 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -169 val_169 -172 val_172 -172 val_172 -172 val_172 -172 val_172 -176 val_176 -176 val_176 -176 val_176 -176 val_176 -183 val_183 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -187 val_187 -19 val_19 -190 val_190 -194 val_194 -202 val_202 -213 val_213 -213 val_213 -213 val_213 -213 val_213 -217 val_217 -217 val_217 -217 val_217 -217 val_217 -224 val_224 -224 val_224 -224 val_224 -224 val_224 -228 val_228 -235 val_235 -239 val_239 -239 val_239 -239 val_239 -239 val_239 -242 val_242 -242 val_242 -242 val_242 -242 val_242 -257 val_257 -26 val_26 -26 val_26 -26 val_26 -26 val_26 -260 val_260 -275 val_275 -282 val_282 -282 val_282 -282 val_282 -282 val_282 -286 val_286 -305 val_305 -309 val_309 -309 val_309 -309 val_309 -309 val_309 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -316 val_316 -323 val_323 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -327 val_327 -33 val_33 -338 val_338 -341 val_341 -345 val_345 -356 val_356 -367 val_367 -367 val_367 -367 val_367 -367 val_367 -37 val_37 -37 val_37 -37 val_37 -37 val_37 -374 val_374 -378 val_378 -389 val_389 -392 val_392 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -396 val_396 -4 val_4 -400 val_400 -404 val_404 -404 val_404 -404 val_404 -404 val_404 -411 val_411 -419 val_419 -437 val_437 -44 val_44 -444 val_444 -448 val_448 -455 val_455 -459 val_459 -459 val_459 -459 val_459 -459 val_459 -462 val_462 -462 val_462 -462 val_462 -462 val_462 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -466 val_466 -477 val_477 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -480 val_480 -484 val_484 -491 val_491 -495 val_495 -51 val_51 -51 val_51 -51 val_51 -51 val_51 -66 val_66 -77 val_77 -8 val_8 -80 val_80 -84 val_84 -84 val_84 -84 val_84 -84 val_84 -95 val_95 -95 val_95 -95 val_95 -95 val_95 +100 val_100 +100 val_100 +100 val_100 +100 val_100 +104 val_104 +104 val_104 +104 val_104 +104 val_104 +111 val_111 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +12 val_12 +12 val_12 +126 val_126 +133 val_133 +137 val_137 +137 val_137 +137 val_137 +137 val_137 +155 val_155 +162 val_162 +166 val_166 +177 val_177 +180 val_180 +191 val_191 +191 val_191 +191 val_191 +191 val_191 +195 val_195 +195 val_195 +195 val_195 +195 val_195 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +199 val_199 +203 val_203 +203 val_203 +203 val_203 +203 val_203 +207 val_207 +207 val_207 +207 val_207 +207 val_207 +214 val_214 +218 val_218 +221 val_221 +221 val_221 +221 val_221 +221 val_221 +229 val_229 +229 val_229 +229 val_229 +229 val_229 +247 val_247 +258 val_258 +265 val_265 +265 val_265 +265 val_265 +265 val_265 +27 val_27 +272 val_272 +272 val_272 +272 val_272 +272 val_272 +283 val_283 +287 val_287 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +298 val_298 +30 val_30 +302 val_302 +306 val_306 +317 val_317 +317 val_317 +317 val_317 +317 val_317 +331 val_331 +331 val_331 +331 val_331 +331 val_331 +335 val_335 +339 val_339 +34 val_34 +342 val_342 +342 val_342 +342 val_342 +342 val_342 +353 val_353 +353 val_353 +353 val_353 +353 val_353 +360 val_360 +364 val_364 +368 val_368 +375 val_375 +379 val_379 +382 val_382 +382 val_382 +382 val_382 +382 val_382 +386 val_386 +393 val_393 +397 val_397 +397 val_397 +397 val_397 +397 val_397 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +401 val_401 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +409 val_409 +41 val_41 +427 val_427 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +430 val_430 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +438 val_438 +449 val_449 +452 val_452 +463 val_463 +463 val_463 +463 val_463 +463 val_463 +467 val_467 +470 val_470 +478 val_478 +478 val_478 +478 val_478 +478 val_478 +481 val_481 +485 val_485 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +489 val_489 +492 val_492 +492 val_492 +492 val_492 +492 val_492 +496 val_496 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +5 val_5 +67 val_67 +67 val_67 +67 val_67 +67 val_67 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +70 val_70 +74 val_74 +78 val_78 +85 val_85 +9 val_9 +92 val_92 +96 val_96 diff --git a/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out b/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out index a118823..169d2f1 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out @@ -62,28 +62,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map 7 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 6 Reduce Operator Tree: @@ -94,7 +88,6 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -111,14 +104,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 100) (type: boolean) - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -179,14 +169,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 >= 100) and (_col0 < 200)) (type: boolean) - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -202,14 +189,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 >= 200) and (_col0 < 300)) (type: boolean) - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -225,14 +209,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 >= 300) (type: boolean) - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1368,28 +1349,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map 7 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 6 Reduce Operator Tree: @@ -1400,7 +1375,6 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -1417,14 +1391,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 < 100) (type: boolean) - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1485,14 +1456,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 >= 100) and (_col0 < 200)) (type: boolean) - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1508,14 +1476,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 >= 200) and (_col0 < 300)) (type: boolean) - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int) outputColumnNames: _col0 - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 30 Data size: 318 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1531,14 +1496,11 @@ STAGE PLANS: TableScan Filter Operator predicate: (_col0 >= 300) (type: boolean) - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 91 Data size: 966 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/ppd_transform.q.out b/ql/src/test/results/clientpositive/spark/ppd_transform.q.out index 78791ca..54b8a8a 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_transform.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_transform.q.out @@ -31,40 +31,32 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 < 100) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -213,36 +205,29 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 < 100) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -391,18 +376,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -419,14 +401,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 = 'a') or (_col0 = 'b')) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -450,14 +429,11 @@ STAGE PLANS: TableScan Filter Operator predicate: ((_col0 = 'c') or (_col0 = 'd')) (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/sample1.q.out b/ql/src/test/results/clientpositive/spark/sample1.q.out index e6c5e62..534ef7a 100644 --- a/ql/src/test/results/clientpositive/spark/sample1.q.out +++ b/ql/src/test/results/clientpositive/spark/sample1.q.out @@ -72,22 +72,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(rand()) & 2147483647) % 1) = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(key) (type: int), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/sample2.q.out b/ql/src/test/results/clientpositive/spark/sample2.q.out index 2b4fcd6..51b3678 100644 --- a/ql/src/test/results/clientpositive/spark/sample2.q.out +++ b/ql/src/test/results/clientpositive/spark/sample2.q.out @@ -56,22 +56,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/sample3.q.out b/ql/src/test/results/clientpositive/spark/sample3.q.out index f15eaba..dff86a7 100644 --- a/ql/src/test/results/clientpositive/spark/sample3.q.out +++ b/ql/src/test/results/clientpositive/spark/sample3.q.out @@ -18,14 +18,11 @@ STAGE PLANS: Processor Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((hash(key) & 2147483647) % 5) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: SELECT s.key diff --git a/ql/src/test/results/clientpositive/spark/sample4.q.out b/ql/src/test/results/clientpositive/spark/sample4.q.out index 6b12cd6..416ab83 100644 --- a/ql/src/test/results/clientpositive/spark/sample4.q.out +++ b/ql/src/test/results/clientpositive/spark/sample4.q.out @@ -58,22 +58,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/sample5.q.out b/ql/src/test/results/clientpositive/spark/sample5.q.out index a605c92..e1ca519 100644 --- a/ql/src/test/results/clientpositive/spark/sample5.q.out +++ b/ql/src/test/results/clientpositive/spark/sample5.q.out @@ -56,22 +56,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 5) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/sample6.q.out b/ql/src/test/results/clientpositive/spark/sample6.q.out index 60a85f4..6ab5e89 100644 --- a/ql/src/test/results/clientpositive/spark/sample6.q.out +++ b/ql/src/test/results/clientpositive/spark/sample6.q.out @@ -56,22 +56,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 4) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -495,20 +491,16 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 4) = 3) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -568,13 +560,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -898,20 +888,16 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -971,13 +957,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1555,20 +1539,16 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 3) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -1628,13 +1608,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -2055,20 +2033,16 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 3) = 1) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -2128,13 +2102,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -2541,20 +2513,16 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -2661,13 +2629,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -2874,20 +2840,16 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: (((hash(key) & 2147483647) % 4) = 1) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -2947,13 +2909,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -3087,13 +3047,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/sample7.q.out b/ql/src/test/results/clientpositive/spark/sample7.q.out index f99d898..0d08599 100644 --- a/ql/src/test/results/clientpositive/spark/sample7.q.out +++ b/ql/src/test/results/clientpositive/spark/sample7.q.out @@ -65,22 +65,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: false predicate: ((((hash(key) & 2147483647) % 4) = 0) and (key > 100)) (type: boolean) - Statistics: Num rows: 166 Data size: 1760 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1760 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 166 Data size: 1760 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/sample8.q.out b/ql/src/test/results/clientpositive/spark/sample8.q.out index d3b6da2..ca12808 100644 --- a/ql/src/test/results/clientpositive/spark/sample8.q.out +++ b/ql/src/test/results/clientpositive/spark/sample8.q.out @@ -99,17 +99,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: ((((((hash(key) & 2147483647) % 1) = 0) and key is not null) and value is not null) and (((hash(key) & 2147483647) % 10) = 0)) (type: boolean) - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE tag: 1 auto parallelism: true Path -> Alias: @@ -308,17 +305,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Filter Operator isSamplingPred: true predicate: ((((((hash(key) & 2147483647) % 10) = 0) and key is not null) and value is not null) and (((hash(key) & 2147483647) % 1) = 0)) (type: boolean) - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 31 Data size: 329 Basic stats: COMPLETE Column stats: NONE tag: 0 value expressions: ds (type: string), hr (type: string) auto parallelism: true @@ -383,21 +377,17 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} {VALUE._col0} {VALUE._col1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8 - Statistics: Num rows: 137 Data size: 1460 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((((_col7 = _col0) and (_col8 = _col1)) and (_col2 = '2008-04-08')) and (_col3 = '11')) (type: boolean) - Statistics: Num rows: 8 Data size: 85 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '11' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 85 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 8 Data size: 85 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -816,30 +806,24 @@ STAGE PLANS: TableScan alias: b Row Limit Per Split: 10 - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: a Row Limit Per Split: 100 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: @@ -850,14 +834,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -907,30 +888,24 @@ STAGE PLANS: TableScan alias: b Row Limit Per Split: 10 - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: a Row Limit Per Split: 100 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Reducer 2 Reduce Operator Tree: @@ -941,17 +916,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 = _col5) (type: boolean) - Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/sample9.q.out b/ql/src/test/results/clientpositive/spark/sample9.q.out index ce05393..326c3de 100644 --- a/ql/src/test/results/clientpositive/spark/sample9.q.out +++ b/ql/src/test/results/clientpositive/spark/sample9.q.out @@ -44,106 +44,23 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Spark -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: a - Statistics: Num rows: 1000 Data size: 10603 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Filter Operator - isSamplingPred: true - predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 500 Data size: 5301 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1 - columns.types int:string - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: srcbucket0.txt - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count 2 - bucket_field_name key - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.srcbucket - numFiles 2 - numRows 1000 - rawDataSize 10603 - serialization.ddl struct srcbucket { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 11603 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count 2 - bucket_field_name key - columns key,value - columns.comments - columns.types int:string -#### A masked pattern was here #### - name default.srcbucket - numFiles 2 - numRows 1000 - rawDataSize 10603 - serialization.ddl struct srcbucket { i32 key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 11603 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.srcbucket - name: default.srcbucket - Truncated Path -> Alias: - /srcbucket/srcbucket0.txt [a] - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: a + GatherStats: false + Filter Operator + isSamplingPred: true + predicate: (((hash(key) & 2147483647) % 2) = 0) (type: boolean) + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + ListSink PREHOOK: query: SELECT s.* FROM (SELECT a.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) a) s diff --git a/ql/src/test/results/clientpositive/spark/script_pipe.q.out b/ql/src/test/results/clientpositive/spark/script_pipe.q.out index 271acaf..fb98eec 100644 --- a/ql/src/test/results/clientpositive/spark/script_pipe.q.out +++ b/ql/src/test/results/clientpositive/spark/script_pipe.q.out @@ -19,41 +19,32 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE Transform Operator command: true output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -84,21 +75,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), key (type: string), value (type: string), key (type: string), value (type: string), key (type: string), value (type: string), key (type: string), value (type: string), key (type: string), value (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Transform Operator command: head -n 1 output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/sort.q.out b/ql/src/test/results/clientpositive/spark/sort.q.out index 888f759..b60c6b8 100644 --- a/ql/src/test/results/clientpositive/spark/sort.q.out +++ b/ql/src/test/results/clientpositive/spark/sort.q.out @@ -19,25 +19,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: x - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out index a1ac731..6f8066d 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out @@ -75,7 +75,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -97,43 +96,33 @@ STAGE PLANS: TableScan Reduce Output Operator sort order: - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Map 6 Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > '2') (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Map 9 Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > '2') and key is null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 10 Reduce Operator Tree: @@ -141,22 +130,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 = 0) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator keys: _col0 (type: bigint) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 11 Reduce Operator Tree: Join Operator @@ -166,12 +150,10 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 7 Reduce Operator Tree: @@ -182,28 +164,22 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col5 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col5 is null (type: boolean) - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 8 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -252,29 +228,23 @@ STAGE PLANS: key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key > '9') and key is not null) and value is not null) (type: boolean) - Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Join Operator @@ -284,14 +254,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -537,7 +504,6 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false table: @@ -559,43 +525,33 @@ STAGE PLANS: TableScan Reduce Output Operator sort order: - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Map 6 Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key > '2') (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: _col0 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Map 9 Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key > '2') and key is null) (type: boolean) - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 10 Reduce Operator Tree: @@ -603,22 +559,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 = 0) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator keys: _col0 (type: bigint) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 11 Reduce Operator Tree: Join Operator @@ -628,12 +579,10 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 7 Reduce Operator Tree: @@ -644,28 +593,22 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1, _col5 - Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: _col5 is null (type: boolean) - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) Reducer 8 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 302 Data size: 3208 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -714,29 +657,23 @@ STAGE PLANS: key expressions: key (type: string), value (type: string) sort order: ++ Map-reduce partition columns: key (type: string), value (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (((key > '9') and key is not null) and value is not null) (type: boolean) - Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string), _col1 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Join Operator @@ -746,14 +683,11 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/temp_table.q.out b/ql/src/test/results/clientpositive/spark/temp_table.q.out index c025738..b3decb5 100644 --- a/ql/src/test/results/clientpositive/spark/temp_table.q.out +++ b/ql/src/test/results/clientpositive/spark/temp_table.q.out @@ -18,17 +18,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key % 2) = 0) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -89,17 +85,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((key % 2) = 1) (type: boolean) - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -170,14 +162,11 @@ STAGE PLANS: Processor Tree: TableScan alias: foo - Statistics: Num rows: 14 Data size: 2856 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 14 Data size: 2856 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 2040 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: select * from foo limit 10 @@ -247,13 +236,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 28 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 2070 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 2070 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/transform1.q.out b/ql/src/test/results/clientpositive/spark/transform1.q.out index a657d01..356509e 100644 --- a/ql/src/test/results/clientpositive/spark/transform1.q.out +++ b/ql/src/test/results/clientpositive/spark/transform1.q.out @@ -25,21 +25,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: transform1_t1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: a (type: string), b (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -98,21 +94,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: transform1_t2 - Statistics: Num rows: -1 Data size: 6 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: '012' (type: string) outputColumnNames: _col0 - Statistics: Num rows: -1 Data size: 6 Basic stats: PARTIAL Column stats: COMPLETE Transform Operator command: cat output info: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: -1 Data size: 6 Basic stats: PARTIAL Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: -1 Data size: 6 Basic stats: PARTIAL Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out b/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out index 8a0f714..352ab13 100644 --- a/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out +++ b/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out @@ -106,12 +106,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string), key (type: string), value (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: @@ -124,16 +122,13 @@ STAGE PLANS: serialization.format 9 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((_col1 < 100) and (_col0 = '2008-04-08')) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: '2008-04-08' (type: string), _col1 (type: string), _col2 (type: string) auto parallelism: true @@ -335,13 +330,11 @@ STAGE PLANS: Select Operator expressions: VALUE._col1 (type: string), VALUE._col2 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out b/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out index 87aad21..bb93e2f 100644 --- a/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out +++ b/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out @@ -108,12 +108,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string), key (type: string), value (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Transform Operator command: cat output info: @@ -126,16 +124,13 @@ STAGE PLANS: serialization.format 9 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: (_col1 < 100) (type: boolean) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col1 (type: string) sort order: + Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) auto parallelism: true @@ -243,13 +238,11 @@ STAGE PLANS: Select Operator expressions: VALUE._col1 (type: string), VALUE._col2 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 333 Data size: 3537 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/union10.q.out b/ql/src/test/results/clientpositive/spark/union10.q.out index 8e2c750..81c8830 100644 --- a/ql/src/test/results/clientpositive/spark/union10.q.out +++ b/ql/src/test/results/clientpositive/spark/union10.q.out @@ -46,49 +46,37 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 4 Map Operator Tree: TableScan alias: s2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 6 Map Operator Tree: TableScan alias: s3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union11.q.out b/ql/src/test/results/clientpositive/spark/union11.q.out index 95d0699..f9947f5 100644 --- a/ql/src/test/results/clientpositive/spark/union11.q.out +++ b/ql/src/test/results/clientpositive/spark/union11.q.out @@ -37,49 +37,37 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 5 Map Operator Tree: TableScan alias: s2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 7 Map Operator Tree: TableScan alias: s3 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -110,14 +98,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union14.q.out b/ql/src/test/results/clientpositive/spark/union14.q.out index 0872072..fa95fe3 100644 --- a/ql/src/test/results/clientpositive/spark/union14.q.out +++ b/ql/src/test/results/clientpositive/spark/union14.q.out @@ -53,17 +53,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 3 Reduce Operator Tree: @@ -72,14 +68,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union15.q.out b/ql/src/test/results/clientpositive/spark/union15.q.out index 7c3d76c..85f1be2 100644 --- a/ql/src/test/results/clientpositive/spark/union15.q.out +++ b/ql/src/test/results/clientpositive/spark/union15.q.out @@ -35,17 +35,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 5 Map Operator Tree: @@ -116,14 +112,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union16.q.out b/ql/src/test/results/clientpositive/spark/union16.q.out index 0de8c4f..c2449e2 100644 --- a/ql/src/test/results/clientpositive/spark/union16.q.out +++ b/ql/src/test/results/clientpositive/spark/union16.q.out @@ -407,14 +407,11 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union18.q.out b/ql/src/test/results/clientpositive/spark/union18.q.out index 26716e3..07ea2c5 100644 --- a/ql/src/test/results/clientpositive/spark/union18.q.out +++ b/ql/src/test/results/clientpositive/spark/union18.q.out @@ -54,17 +54,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 6 Map Operator Tree: @@ -107,10 +103,8 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 136272 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 501 Data size: 136272 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -156,10 +150,8 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 501 Data size: 228456 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 501 Data size: 228456 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union19.q.out b/ql/src/test/results/clientpositive/spark/union19.q.out index ac3856c..2fefe8e 100644 --- a/ql/src/test/results/clientpositive/spark/union19.q.out +++ b/ql/src/test/results/clientpositive/spark/union19.q.out @@ -54,17 +54,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 6 Map Operator Tree: @@ -109,18 +105,15 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 5584 Basic stats: COMPLETE Column stats: PARTIAL Group By Operator aggregations: count(_col1) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reducer 7 Reduce Operator Tree: @@ -129,14 +122,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -182,10 +172,8 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 501 Data size: 228456 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 501 Data size: 228456 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union2.q.out b/ql/src/test/results/clientpositive/spark/union2.q.out index 990bbf5..74edd9e 100644 --- a/ql/src/test/results/clientpositive/spark/union2.q.out +++ b/ql/src/test/results/clientpositive/spark/union2.q.out @@ -56,14 +56,11 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union23.q.out b/ql/src/test/results/clientpositive/spark/union23.q.out index fd15966..1656393 100644 --- a/ql/src/test/results/clientpositive/spark/union23.q.out +++ b/ql/src/test/results/clientpositive/spark/union23.q.out @@ -65,10 +65,8 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union25.q.out b/ql/src/test/results/clientpositive/spark/union25.q.out index fc07c29..7905889 100644 --- a/ql/src/test/results/clientpositive/spark/union25.q.out +++ b/ql/src/test/results/clientpositive/spark/union25.q.out @@ -159,14 +159,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 279 Data size: 5562 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: bigint), _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 279 Data size: 5562 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 279 Data size: 5562 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union28.q.out b/ql/src/test/results/clientpositive/spark/union28.q.out index 32999fa..48a071b 100644 --- a/ql/src/test/results/clientpositive/spark/union28.q.out +++ b/ql/src/test/results/clientpositive/spark/union28.q.out @@ -68,43 +68,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), value (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Map 5 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), value (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Reducer 4 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union3.q.out b/ql/src/test/results/clientpositive/spark/union3.q.out index 9878c64..259faa2 100644 --- a/ql/src/test/results/clientpositive/spark/union3.q.out +++ b/ql/src/test/results/clientpositive/spark/union3.q.out @@ -59,70 +59,50 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Map 4 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Map 6 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Map 9 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reducer 10 Reduce Operator Tree: Select Operator - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: 2 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reducer 11 Reduce Operator Tree: Select Operator @@ -174,19 +154,15 @@ STAGE PLANS: Reducer 7 Reduce Operator Tree: Select Operator - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator expressions: 1 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Reducer 8 Reduce Operator Tree: Select Operator diff --git a/ql/src/test/results/clientpositive/spark/union30.q.out b/ql/src/test/results/clientpositive/spark/union30.q.out index f871f67..d18a98a 100644 --- a/ql/src/test/results/clientpositive/spark/union30.q.out +++ b/ql/src/test/results/clientpositive/spark/union30.q.out @@ -65,43 +65,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), value (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Map 4 Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string) outputColumnNames: key, value - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), value (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col2 (type: bigint) Map 6 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union33.q.out b/ql/src/test/results/clientpositive/spark/union33.q.out index d77d676..e6d0364 100644 --- a/ql/src/test/results/clientpositive/spark/union33.q.out +++ b/ql/src/test/results/clientpositive/spark/union33.q.out @@ -70,22 +70,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -94,12 +90,10 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -211,22 +205,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Map 5 Map Operator Tree: @@ -254,12 +244,10 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: partials outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 3 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union4.q.out b/ql/src/test/results/clientpositive/spark/union4.q.out index dfaabbd..8ab70bf 100644 --- a/ql/src/test/results/clientpositive/spark/union4.q.out +++ b/ql/src/test/results/clientpositive/spark/union4.q.out @@ -43,33 +43,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 4 Map Operator Tree: TableScan alias: s2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union5.q.out b/ql/src/test/results/clientpositive/spark/union5.q.out index bb94820..dd78c89 100644 --- a/ql/src/test/results/clientpositive/spark/union5.q.out +++ b/ql/src/test/results/clientpositive/spark/union5.q.out @@ -32,33 +32,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 5 Map Operator Tree: TableScan alias: s2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -89,14 +81,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union6.q.out b/ql/src/test/results/clientpositive/spark/union6.q.out index 68ad6fe..4a56842 100644 --- a/ql/src/test/results/clientpositive/spark/union6.q.out +++ b/ql/src/test/results/clientpositive/spark/union6.q.out @@ -42,17 +42,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 4 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union7.q.out b/ql/src/test/results/clientpositive/spark/union7.q.out index 7497362..212dd7d 100644 --- a/ql/src/test/results/clientpositive/spark/union7.q.out +++ b/ql/src/test/results/clientpositive/spark/union7.q.out @@ -31,17 +31,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: s1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Map 5 Map Operator Tree: @@ -92,14 +88,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union9.q.out b/ql/src/test/results/clientpositive/spark/union9.q.out index d3797ce..06b09f4 100644 --- a/ql/src/test/results/clientpositive/spark/union9.q.out +++ b/ql/src/test/results/clientpositive/spark/union9.q.out @@ -71,14 +71,11 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union_ppr.q.out b/ql/src/test/results/clientpositive/spark/union_ppr.q.out index 9477dcd..a1b8fbd 100644 --- a/ql/src/test/results/clientpositive/spark/union_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/union_ppr.q.out @@ -356,13 +356,11 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: string), KEY.reducesinkkey3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 666 Data size: 7074 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 666 Data size: 7074 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out index 8fe85fb..ae17bc4 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out @@ -78,43 +78,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out index d3a1f46..bac0e1a 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out @@ -98,22 +98,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out index 6ec6e91..fb19959 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out @@ -76,52 +76,43 @@ STAGE PLANS: Spark Edges: Reducer 2 <- Map 1 (GROUP) - Reducer 4 <- Union 3 (GROUP SORT) - Reducer 6 <- Map 5 (GROUP) - Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE) + Reducer 5 <- Map 4 (GROUP) + Union 3 <- Reducer 2 (NONE), Reducer 5 (NONE) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) - Map 5 + Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -136,24 +127,14 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) - Reducer 4 - Reduce Operator Tree: - Extract - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 - Reducer 6 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Reducer 5 Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) @@ -166,11 +147,13 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out index 881291c..db579d6 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out @@ -79,52 +79,43 @@ STAGE PLANS: Spark Edges: Reducer 2 <- Map 1 (GROUP) - Reducer 4 <- Union 3 (GROUP SORT) - Reducer 6 <- Map 5 (GROUP) - Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE) + Reducer 5 <- Map 4 (GROUP) + Union 3 <- Reducer 2 (NONE), Reducer 5 (NONE) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) - Map 5 + Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -139,24 +130,14 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) - Reducer 4 - Reduce Operator Tree: - Extract - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 - Reducer 6 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Reducer 5 Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) @@ -169,11 +150,13 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out index 9af6da8..bee8aa9 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out @@ -69,8 +69,7 @@ STAGE PLANS: Stage: Stage-1 Spark Edges: - Reducer 3 <- Union 2 (GROUP SORT) - Union 2 <- Map 1 (NONE), Map 4 (NONE) + Union 2 <- Map 1 (NONE), Map 3 (NONE) #### A masked pattern was here #### Vertices: Map 1 @@ -83,12 +82,14 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) - Map 4 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Map 3 Map Operator Tree: TableScan alias: inputtbl1 @@ -98,23 +99,13 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) - Reducer 3 - Reduce Operator Tree: - Extract - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 Union 2 Vertex: Union 2 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out index 54267be..f73cd43 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out @@ -74,52 +74,43 @@ STAGE PLANS: Spark Edges: Reducer 2 <- Map 1 (GROUP) - Reducer 4 <- Union 3 (GROUP SORT) - Reducer 6 <- Map 5 (GROUP) - Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE) + Reducer 5 <- Map 4 (GROUP) + Union 3 <- Reducer 2 (NONE), Reducer 5 (NONE) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), ds (type: string) outputColumnNames: key, ds - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), ds (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col2 (type: bigint) - Map 5 + Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string), ds (type: string) outputColumnNames: key, ds - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string), ds (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col2 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -134,24 +125,14 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) - Reducer 4 - Reduce Operator Tree: - Extract - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl1 - Reducer 6 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + Reducer 5 Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) @@ -164,11 +145,13 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 Union 3 Vertex: Union 3 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out index acc76f3..fe35c3c 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out @@ -82,43 +82,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -290,49 +282,39 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 30 Data size: 30 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 7) (type: boolean) - Statistics: Num rows: 15 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '7' (type: string) outputColumnNames: key - Statistics: Num rows: 15 Data size: 15 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 15 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 15 Data size: 15 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 30 Data size: 30 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key = 7) (type: boolean) - Statistics: Num rows: 15 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: '7' (type: string) outputColumnNames: key - Statistics: Num rows: 15 Data size: 15 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 15 Data size: 15 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 15 Data size: 15 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -466,43 +448,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out index a55d00f..b375d78 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out @@ -100,22 +100,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 5 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out index e240837..635c4ac 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out @@ -80,43 +80,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out index 7265bc7..a6a634c 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out @@ -80,43 +80,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out index c46971c..d3925ec 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out @@ -76,43 +76,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out index 619abfa..e9f9230 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out @@ -94,43 +94,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -301,42 +293,33 @@ STAGE PLANS: Spark Edges: Reducer 2 <- Map 1 (GROUP) - Reducer 4 <- Union 3 (GROUP SORT) - Reducer 6 <- Map 5 (GROUP) - Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE) + Reducer 5 <- Map 4 (GROUP) + Union 3 <- Reducer 2 (NONE), Reducer 5 (NONE) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 500 - Statistics: Num rows: 500 Data size: 5000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 500 Data size: 5000 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Map 5 + Map 4 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 500 - Statistics: Num rows: 500 Data size: 5000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 500 Data size: 5000 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) Reducer 2 Reduce Operator Tree: @@ -348,24 +331,14 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) - Reducer 4 - Reduce Operator Tree: - Extract - Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl2 - Reducer 6 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl2 + Reducer 5 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string) @@ -375,11 +348,13 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl2 Union 3 Vertex: Union 3 @@ -456,10 +431,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE false - numFiles 1 + numFiles 2 numRows -1 rawDataSize -1 - totalSize 6826 + totalSize 6814 #### A masked pattern was here #### # Storage Information @@ -498,42 +473,33 @@ STAGE PLANS: Spark Edges: Reducer 2 <- Map 1 (GROUP) - Reducer 4 <- Union 3 (GROUP SORT) - Reducer 6 <- Map 5 (GROUP) - Union 3 <- Reducer 2 (NONE), Reducer 6 (NONE) + Reducer 5 <- Map 4 (GROUP) + Union 3 <- Reducer 2 (NONE), Reducer 5 (NONE) #### A masked pattern was here #### Vertices: Map 1 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 1000 - Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Map 5 + Map 4 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 1000 - Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1000 Data size: 10000 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) Reducer 2 Reduce Operator Tree: @@ -545,24 +511,14 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string) - Reducer 4 - Reduce Operator Tree: - Extract - Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 20000 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.outputtbl3 - Reducer 6 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + Reducer 5 Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string), VALUE._col2 (type: string), VALUE._col3 (type: string) @@ -572,11 +528,13 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 Union 3 Vertex: Union 3 @@ -659,7 +617,7 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE false - numFiles 1 + numFiles 2 numRows -1 rawDataSize -1 totalSize 6812 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out index 281fead..354a386 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out @@ -83,43 +83,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out index 07a4a95..5732543 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out @@ -107,22 +107,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 5 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6.q.out index 7edbcf4..147f1fe 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_6.q.out @@ -83,43 +83,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 6 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: @@ -166,10 +158,8 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -209,10 +199,8 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out index 7f4b1c3..ea8170b 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out @@ -82,43 +82,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 4 Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Reducer 2 Reduce Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out index 3f7dbb9..af768da 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out @@ -104,22 +104,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 5 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out index 93541db..730a9c2 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out @@ -114,22 +114,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: inputtbl1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count(1) keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE value expressions: _col1 (type: bigint) Map 5 Map Operator Tree: diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out index a030571..2b3ed0d 100644 --- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out @@ -128,22 +128,18 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1korc - Statistics: Num rows: 5887 Data size: 23548 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: i (type: int) outputColumnNames: i - Statistics: Num rows: 5887 Data size: 23548 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: avg(50), avg(UDFToDouble(50)), avg(CAST( 50 AS decimal(10,0))) keys: i (type: int) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5887 Data size: 23548 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5887 Data size: 23548 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: struct), _col2 (type: struct), _col3 (type: struct) Execution mode: vectorized Reducer 2 @@ -153,28 +149,22 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2943 Data size: 11772 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2943 Data size: 11772 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + - Statistics: Num rows: 2943 Data size: 11772 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: decimal(14,4)) Reducer 3 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: decimal(14,4)) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2943 Data size: 11772 Basic stats: COMPLETE Column stats: NONE Limit Number of rows: 10 - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out index f3cbfba..0f5c203 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out @@ -53,25 +53,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((cstring2 like '%b%') and ((cdouble >= -1.389) or (cstring1 < 'a'))) (type: boolean) - Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp) outputColumnNames: cstring1, cdouble, ctimestamp1 - Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble) keys: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp) - Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE value expressions: _col3 (type: bigint), _col4 (type: struct), _col5 (type: double) Execution mode: vectorized Reducer 2 @@ -81,14 +76,11 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * _col3) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (_col3 / -1.389) (type: double), _col4 (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 - Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out index 776ce50..979a777 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out @@ -22,29 +22,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: t2 - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: cint is not null (type: boolean) - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: cint (type: int) sort order: + Map-reduce partition columns: cint (type: int) - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized Map 4 Map Operator Tree: TableScan alias: t1 - Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: cint is not null (type: boolean) - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: cint (type: int) sort order: + Map-reduce partition columns: cint (type: int) - Statistics: Num rows: 6144 Data size: 188618 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized Reducer 2 Reduce Operator Tree: @@ -55,19 +49,15 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col2, _col17 - Statistics: Num rows: 6758 Data size: 207479 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: int), _col17 (type: int) outputColumnNames: _col2, _col17 - Statistics: Num rows: 6758 Data size: 207479 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(_col2), max(_col17), min(_col2), avg((_col2 + _col17)) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint), _col1 (type: int), _col2 (type: int), _col3 (type: struct) Reducer 3 Reduce Operator Tree: @@ -75,14 +65,11 @@ STAGE PLANS: aggregations: count(VALUE._col0), max(VALUE._col1), min(VALUE._col2), avg(VALUE._col3) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint), _col1 (type: int), _col2 (type: int), _col3 (type: double) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/stats10.q.out b/ql/src/test/results/clientpositive/stats10.q.out index f5bdba1..804e1b8 100644 --- a/ql/src/test/results/clientpositive/stats10.q.out +++ b/ql/src/test/results/clientpositive/stats10.q.out @@ -373,7 +373,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: bucket3_1 - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Stage: Stage-1 Stats-Aggr Operator diff --git a/ql/src/test/results/clientpositive/stats11.q.out b/ql/src/test/results/clientpositive/stats11.q.out index 11762bd..c7f1abe 100644 --- a/ql/src/test/results/clientpositive/stats11.q.out +++ b/ql/src/test/results/clientpositive/stats11.q.out @@ -535,8 +535,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -556,8 +554,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.srcbucket_mapjoin numFiles 2 - numRows 0 - rawDataSize 0 serialization.ddl struct srcbucket_mapjoin { i32 key, string value} serialization.format 1 serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/stats12.q.out b/ql/src/test/results/clientpositive/stats12.q.out index 2588380..e888518 100644 --- a/ql/src/test/results/clientpositive/stats12.q.out +++ b/ql/src/test/results/clientpositive/stats12.q.out @@ -63,7 +63,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_srcpart - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: NONE Statistics Aggregation Key Prefix: default.analyze_srcpart/ GatherStats: true Path -> Alias: diff --git a/ql/src/test/results/clientpositive/stats13.q.out b/ql/src/test/results/clientpositive/stats13.q.out index f72e310..94f9541 100644 --- a/ql/src/test/results/clientpositive/stats13.q.out +++ b/ql/src/test/results/clientpositive/stats13.q.out @@ -64,7 +64,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_srcpart - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: NONE Statistics Aggregation Key Prefix: default.analyze_srcpart/ GatherStats: true Path -> Alias: diff --git a/ql/src/test/results/clientpositive/stats2.q.out b/ql/src/test/results/clientpositive/stats2.q.out index 63c0ad5..694c1a2 100644 --- a/ql/src/test/results/clientpositive/stats2.q.out +++ b/ql/src/test/results/clientpositive/stats2.q.out @@ -27,23 +27,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.analyze_t1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.analyze_t1 Stage: Stage-0 Move Operator @@ -136,7 +127,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_t1 - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats-Aggr Operator diff --git a/ql/src/test/results/clientpositive/stats4.q.out b/ql/src/test/results/clientpositive/stats4.q.out index 7327b54..39d5413 100644 --- a/ql/src/test/results/clientpositive/stats4.q.out +++ b/ql/src/test/results/clientpositive/stats4.q.out @@ -48,11 +48,20 @@ insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, v POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage - Stage-0 depends on stages: Stage-2 + Stage-8 depends on stages: Stage-2 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-0 depends on stages: Stage-5, Stage-4, Stage-7 Stage-3 depends on stages: Stage-0 - Stage-4 depends on stages: Stage-2 - Stage-1 depends on stages: Stage-4 - Stage-5 depends on stages: Stage-1 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + Stage-14 depends on stages: Stage-2 , consists of Stage-11, Stage-10, Stage-12 + Stage-11 + Stage-1 depends on stages: Stage-11, Stage-10, Stage-13 + Stage-9 depends on stages: Stage-1 + Stage-10 + Stage-12 + Stage-13 depends on stages: Stage-12 STAGE PLANS: Stage: Stage-2 @@ -68,12 +77,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 Filter Operator predicate: (ds > '2008-04-08') (type: boolean) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE @@ -83,21 +94,21 @@ STAGE PLANS: Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false + Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part1 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -119,23 +130,40 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 + + Stage: Stage-6 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-14 + Conditional Operator + + Stage: Stage-11 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-1 Move Operator @@ -150,9 +178,39 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.nzhang_part2 - Stage: Stage-5 + Stage: Stage-9 Stats-Aggr Operator + Stage: Stage-10 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-12 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 + + Stage: Stage-13 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + PREHOOK: query: from srcpart insert overwrite table nzhang_part1 partition (ds, hr) select key, value, ds, hr where ds <= '2008-04-08' insert overwrite table nzhang_part2 partition(ds='2008-12-31', hr) select key, value, hr where ds > '2008-04-08' diff --git a/ql/src/test/results/clientpositive/stats7.q.out b/ql/src/test/results/clientpositive/stats7.q.out index 097546b..1162103 100644 --- a/ql/src/test/results/clientpositive/stats7.q.out +++ b/ql/src/test/results/clientpositive/stats7.q.out @@ -47,7 +47,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_srcpart - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats-Aggr Operator diff --git a/ql/src/test/results/clientpositive/stats8.q.out b/ql/src/test/results/clientpositive/stats8.q.out index ae5040b..11b2010 100644 --- a/ql/src/test/results/clientpositive/stats8.q.out +++ b/ql/src/test/results/clientpositive/stats8.q.out @@ -47,7 +47,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_srcpart - Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats-Aggr Operator @@ -157,7 +157,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_srcpart - Statistics: Num rows: 500 Data size: 5312 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats-Aggr Operator @@ -230,7 +230,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_srcpart - Statistics: Num rows: 1000 Data size: 10624 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats-Aggr Operator @@ -303,7 +303,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_srcpart - Statistics: Num rows: 1500 Data size: 15936 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1500 Data size: 15936 Basic stats: PARTIAL Column stats: NONE Stage: Stage-1 Stats-Aggr Operator @@ -376,7 +376,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: analyze_srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Stage: Stage-1 Stats-Aggr Operator diff --git a/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out b/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out index 27a3742..982baab 100644 --- a/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out +++ b/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out @@ -20,8 +20,13 @@ POSTHOOK: query: explain insert overwrite table tmptable partition (part) select POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -37,23 +42,23 @@ STAGE PLANS: expressions: 'no_such_value' (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string) - sort order: + - Map-reduce partition columns: _col1 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmptable + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmptable + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -70,6 +75,36 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmptable + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmptable + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + PREHOOK: query: insert overwrite table tmptable partition (part) select key, value from src where key = 'no_such_value' PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/stats_only_null.q.out b/ql/src/test/results/clientpositive/stats_only_null.q.out index c4728c9..4549b79 100644 --- a/ql/src/test/results/clientpositive/stats_only_null.q.out +++ b/ql/src/test/results/clientpositive/stats_only_null.q.out @@ -322,16 +322,20 @@ STAGE PLANS: PREHOOK: query: select count(*), count(a), count(b), count(c), count(d) from stats_null PREHOOK: type: QUERY +PREHOOK: Input: default@stats_null #### A masked pattern was here #### POSTHOOK: query: select count(*), count(a), count(b), count(c), count(d) from stats_null POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_null #### A masked pattern was here #### 10 8 8 10 10 PREHOOK: query: select count(*), count(a), count(b), count(c), count(d) from stats_null_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_null_part #### A masked pattern was here #### POSTHOOK: query: select count(*), count(a), count(b), count(c), count(d) from stats_null_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_null_part #### A masked pattern was here #### 10 8 8 10 10 PREHOOK: query: drop table stats_null_part diff --git a/ql/src/test/results/clientpositive/subquery_alias.q.out b/ql/src/test/results/clientpositive/subquery_alias.q.out index cda4c66..c0d80ea 100644 --- a/ql/src/test/results/clientpositive/subquery_alias.q.out +++ b/ql/src/test/results/clientpositive/subquery_alias.q.out @@ -97,33 +97,21 @@ SELECT * FROM ) as src2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: s - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: s + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: SELECT * FROM ( SELECT * FROM diff --git a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out index 399c4f8..ebd47ca 100644 --- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out +++ b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out @@ -99,7 +99,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -566,7 +566,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out index d0e50b9..c5cfe15 100644 --- a/ql/src/test/results/clientpositive/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin.q.out @@ -150,7 +150,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -537,7 +537,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -823,7 +823,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -1145,7 +1145,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -1489,7 +1489,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.out index 91581de..87da349 100644 --- a/ql/src/test/results/clientpositive/subquery_notin_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.out @@ -99,7 +99,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -476,7 +476,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -728,7 +728,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out index fe99821..273037a 100644 --- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out +++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out @@ -928,7 +928,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out index 472fa10..50a5e29 100644 --- a/ql/src/test/results/clientpositive/subquery_views.q.out +++ b/ql/src/test/results/clientpositive/subquery_views.q.out @@ -120,7 +120,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -287,7 +287,7 @@ STAGE PLANS: predicate: (_col0 = 0) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: bigint) + expressions: 0 (type: bigint) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out index b021b70..404a9c2 100644 --- a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out +++ b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out @@ -171,8 +171,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.uservisits_web_text_none numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} serialization.format | serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -192,8 +190,6 @@ STAGE PLANS: #### A masked pattern was here #### name default.uservisits_web_text_none numFiles 1 - numRows 0 - rawDataSize 0 serialization.ddl struct uservisits_web_text_none { string sourceip, string desturl, string visitdate, float adrevenue, string useragent, string ccode, string lcode, string skeyword, i32 avgtimeonsite} serialization.format | serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe diff --git a/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out index eae45b2..f8486ad 100644 --- a/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out +++ b/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out @@ -65,9 +65,11 @@ value string #### A masked pattern was here #### PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_stat PREHOOK: Output: default@src_orc_merge_test_stat POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: desc formatted src_orc_merge_test_stat PREHOOK: type: DESCTABLE @@ -115,9 +117,11 @@ POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_stat PREHOOK: Output: default@src_orc_merge_test_stat POSTHOOK: query: analyze table src_orc_merge_test_stat compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_stat POSTHOOK: Output: default@src_orc_merge_test_stat PREHOOK: query: desc formatted src_orc_merge_test_stat PREHOOK: type: DESCTABLE @@ -243,8 +247,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 3 - numRows 500 - rawDataSize 47000 + numRows 1500 + rawDataSize 141000 totalSize 7488 #### A masked pattern was here #### @@ -260,10 +264,12 @@ Storage Desc Params: serialization.format 1 PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') @@ -317,10 +323,12 @@ POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat PREHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 POSTHOOK: query: analyze table src_orc_merge_test_part_stat partition(ds='2011') compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat POSTHOOK: Output: default@src_orc_merge_test_part_stat@ds=2011 PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') diff --git a/ql/src/test/results/clientpositive/tez/auto_join0.q.out b/ql/src/test/results/clientpositive/tez/auto_join0.q.out index f10cc51..58201ad 100644 --- a/ql/src/test/results/clientpositive/tez/auto_join0.q.out +++ b/ql/src/test/results/clientpositive/tez/auto_join0.q.out @@ -56,6 +56,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 4 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/auto_join1.q.out b/ql/src/test/results/clientpositive/tez/auto_join1.q.out index 3ba216d..85708d7 100644 --- a/ql/src/test/results/clientpositive/tez/auto_join1.q.out +++ b/ql/src/test/results/clientpositive/tez/auto_join1.q.out @@ -59,6 +59,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0, _col6 + input vertices: + 1 Map 1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_1.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_1.q.out new file mode 100644 index 0000000..2b4a68d --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_1.q.out @@ -0,0 +1,1049 @@ +PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket + +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket + +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + /bucket_big/ds=2008-04-09 [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +38 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + /bucket_big/ds=2008-04-09 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +38 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + /bucket_big/ds=2008-04-09 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +38 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_10.q.out new file mode 100644 index 0000000..d661eef --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_10.q.out @@ -0,0 +1,369 @@ +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl1 +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl1 +PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl2 +POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl2 +PREHOOK: query: insert overwrite table tbl1 +select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl1 +POSTHOOK: query: insert overwrite table tbl1 +select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl1 +POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2 +select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl2 +POSTHOOK: query: insert overwrite table tbl2 +select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl2 +POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join. +explain +select count(*) from + ( + select * from + (select a.key as key, a.value as value from tbl1 a where key < 6 + union all + select a.key as key, a.value as value from tbl1 a where key < 6 + ) usubq1 ) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join. +explain +select count(*) from + ( + select * from + (select a.key as key, a.value as value from tbl1 a where key < 6 + union all + select a.key as key, a.value as value from tbl1 a where key < 6 + ) usubq1 ) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 5 (BROADCAST_EDGE), Union 2 (CONTAINS) + Map 4 <- Map 5 (BROADCAST_EDGE), Union 2 (CONTAINS) + Reducer 3 <- Union 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 5 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Map 4 + Map Operator Tree: + TableScan + alias: a + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 5 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Map 5 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Union 2 + Vertex: Union 2 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + ( + select * from + (select a.key as key, a.value as value from tbl1 a where key < 6 + union all + select a.key as key, a.value as value from tbl1 a where key < 6 + ) usubq1 ) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + ( + select * from + (select a.key as key, a.value as value from tbl1 a where key < 6 + union all + select a.key as key, a.value as value from tbl1 a where key < 6 + ) usubq1 ) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +40 +PREHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 3 <- Reducer 2 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 4 <- Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + bucketGroup: true + keys: key (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 0 Reducer 2 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +8 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_11.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_11.q.out new file mode 100644 index 0000000..6658313 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_11.q.out @@ -0,0 +1,1505 @@ +PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket + +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket + +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + /bucket_big/ds=2008-04-09 [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +38 +PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +-- The tables are only bucketed and not sorted, the join should not be converted +-- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to +-- bucketized mapjoin is not done +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +-- The tables are only bucketed and not sorted, the join should not be converted +-- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to +-- bucketized mapjoin is not done +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + /bucket_big/ds=2008-04-09 [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +38 +PREHOOK: query: -- The join is converted to a bucketed mapjoin with a mapjoin hint +explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is converted to a bucketed mapjoin with a mapjoin hint +explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_HINTLIST + TOK_HINT + TOK_MAPJOIN + TOK_HINTARGLIST + a + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + /bucket_big/ds=2008-04-09 [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +38 +PREHOOK: query: -- HIVE-7023 +explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key +PREHOOK: type: QUERY +POSTHOOK: query: -- HIVE-7023 +explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_TABREF + TOK_TABNAME + bucket_big + c + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + c + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_HINTLIST + TOK_HINT + TOK_MAPJOIN + TOK_HINTARGLIST + a + b + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 + 1 + 2 + Estimated key counts: Map 4 => 1, Map 3 => 58 + keys: + 0 key (type: string) + 1 key (type: string) + 2 key (type: string) + input vertices: + 0 Map 4 + 2 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + /bucket_big/ds=2008-04-09 [b] + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + tag: 2 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [c] + /bucket_big/ds=2008-04-09 [c] + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +180 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_12.q.out new file mode 100644 index 0000000..ce98312 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_12.q.out @@ -0,0 +1,639 @@ +PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket + +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket + +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: CREATE TABLE bucket_medium (key string, value string) partitioned by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_medium +POSTHOOK: query: CREATE TABLE bucket_medium (key string, value string) partitioned by (ds string) +CLUSTERED BY (key) SORTED BY (key) INTO 3 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_medium +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_medium +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_medium +POSTHOOK: Output: default@bucket_medium@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_medium@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_medium@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_medium@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_medium partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_medium@ds=2008-04-08 +Warning: Map Join MAPJOIN[30][bigTable=?] in task 'Map 3' is a cross product +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_JOIN + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_medium + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_TABREF + TOK_TABNAME + bucket_big + c + = + . + TOK_TABLE_OR_COL + c + key + . + TOK_TABLE_OR_COL + b + key + TOK_TABREF + TOK_TABNAME + bucket_medium + d + = + . + TOK_TABLE_OR_COL + c + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 3 <- Map 1 (BROADCAST_EDGE), Map 2 (BROADCAST_EDGE), Map 5 (BROADCAST_EDGE) + Reducer 4 <- Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: d + Statistics: Num rows: 0 Data size: 170 Basic stats: PARTIAL Column stats: NONE + GatherStats: false + Reduce Output Operator + sort order: + Statistics: Num rows: 0 Data size: 170 Basic stats: PARTIAL Column stats: NONE + tag: 1 + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 3 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_medium + numFiles 3 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_medium { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 170 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 3 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_medium + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_medium { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_medium + name: default.bucket_medium + Truncated Path -> Alias: + /bucket_medium/ds=2008-04-08 [d] + Map 2 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 1 Data size: 170 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 170 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 170 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 3 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_medium + numFiles 3 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_medium { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 170 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 3 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_medium + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_medium { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_medium + name: default.bucket_medium + Truncated Path -> Alias: + /bucket_medium/ds=2008-04-08 [b] + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 1 to 2 + condition expressions: + 0 + 1 + 2 + Estimated key counts: Map 5 => 1, Map 2 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + 2 key (type: string) + input vertices: + 0 Map 5 + 1 Map 2 + Position of Big Table: 2 + Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 + 1 + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 139 Data size: 14064 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 139 Data size: 14064 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [c] + /bucket_big/ds=2008-04-09 [c] + Map 5 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + Reducer 4 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Map Join MAPJOIN[30][bigTable=?] in task 'Map 3' is a cross product +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_medium +PREHOOK: Input: default@bucket_medium@ds=2008-04-08 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_medium b ON a.key = b.key JOIN bucket_big c ON c.key = b.key JOIN bucket_medium d ON c.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_medium +POSTHOOK: Input: default@bucket_medium@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +570 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_13.q.out new file mode 100644 index 0000000..ccdc9d0 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_13.q.out @@ -0,0 +1,692 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl1 +PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl2 +POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl2 +PREHOOK: query: insert overwrite table tbl1 select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl1 +POSTHOOK: query: insert overwrite table tbl1 select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl1 +POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2 select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl2 +POSTHOOK: query: insert overwrite table tbl2 select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl2 +POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: CREATE TABLE dest1(k1 int, k2 int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dest1 +POSTHOOK: query: CREATE TABLE dest1(k1 int, k2 int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dest1 +PREHOOK: query: CREATE TABLE dest2(k1 string, k2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dest2 +POSTHOOK: query: CREATE TABLE dest2(k1 string, k2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dest2 +PREHOOK: query: -- A SMB join followed by a mutli-insert +explain +from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join followed by a mutli-insert +explain +from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-5 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {key} {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + Select Operator + expressions: _col1 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + Map 2 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + + Stage: Stage-5 + Stats-Aggr Operator + +PREHOOK: query: from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest2 +POSTHOOK: query: from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest2 +POSTHOOK: Lineage: dest1.k1 SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1.k2 SIMPLE [(tbl2)b.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest2.k1 SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest2.k2 SIMPLE [(tbl2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: select * from dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +2 2 +4 4 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +8 8 +9 9 +PREHOOK: query: select * from dest2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest2 +#### A masked pattern was here #### +POSTHOOK: query: select * from dest2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest2 +#### A masked pattern was here #### +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_2 val_2 +val_4 val_4 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_8 val_8 +val_9 val_9 +PREHOOK: query: -- A SMB join followed by a mutli-insert +explain +from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join followed by a mutli-insert +explain +from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-5 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {key} {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + Select Operator + expressions: _col1 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + Map 2 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + + Stage: Stage-5 + Stats-Aggr Operator + +PREHOOK: query: from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest2 +POSTHOOK: query: from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest2 +POSTHOOK: Lineage: dest1.k1 SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1.k2 SIMPLE [(tbl2)b.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest2.k1 SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest2.k2 SIMPLE [(tbl2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: select * from dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +2 2 +4 4 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +8 8 +9 9 +PREHOOK: query: select * from dest2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest2 +#### A masked pattern was here #### +POSTHOOK: query: select * from dest2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest2 +#### A masked pattern was here #### +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_2 val_2 +val_4 val_4 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_8 val_8 +val_9 val_9 +PREHOOK: query: -- A SMB join followed by a mutli-insert +explain +from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join followed by a mutli-insert +explain +from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 + Stage-4 depends on stages: Stage-0 + Stage-1 depends on stages: Stage-3 + Stage-5 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-2 + Tez + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {key} {value} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col2 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + Select Operator + expressions: _col1 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + Map 2 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + + Stage: Stage-3 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest1 + + Stage: Stage-4 + Stats-Aggr Operator + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.dest2 + + Stage: Stage-5 + Stats-Aggr Operator + +PREHOOK: query: from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +PREHOOK: Output: default@dest1 +PREHOOK: Output: default@dest2 +POSTHOOK: query: from ( + SELECT a.key key1, a.value value1, b.key key2, b.value value2 + FROM tbl1 a JOIN tbl2 b + ON a.key = b.key ) subq +INSERT OVERWRITE TABLE dest1 select key1, key2 +INSERT OVERWRITE TABLE dest2 select value1, value2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Output: default@dest1 +POSTHOOK: Output: default@dest2 +POSTHOOK: Lineage: dest1.k1 SIMPLE [(tbl1)a.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest1.k2 SIMPLE [(tbl2)b.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dest2.k1 SIMPLE [(tbl1)a.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: dest2.k2 SIMPLE [(tbl2)b.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select * from dest1 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest1 +#### A masked pattern was here #### +POSTHOOK: query: select * from dest1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest1 +#### A masked pattern was here #### +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +0 0 +2 2 +4 4 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +5 5 +8 8 +9 9 +PREHOOK: query: select * from dest2 +PREHOOK: type: QUERY +PREHOOK: Input: default@dest2 +#### A masked pattern was here #### +POSTHOOK: query: select * from dest2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dest2 +#### A masked pattern was here #### +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_0 val_0 +val_2 val_2 +val_4 val_4 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_5 val_5 +val_8 val_8 +val_9 val_9 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_14.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_14.q.out new file mode 100644 index 0000000..c594c85 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_14.q.out @@ -0,0 +1,234 @@ +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl1 +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl1 +PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl2 +POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl2 +PREHOOK: query: insert overwrite table tbl1 select * from src where key < 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl1 +POSTHOOK: query: insert overwrite table tbl1 select * from src where key < 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl1 +POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2 select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl2 +POSTHOOK: query: insert overwrite table tbl2 select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl2 +POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed +explain +select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed +explain +select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 1 Map 1 + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +32 +PREHOOK: query: insert overwrite table tbl2 select * from src where key < 200 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl2 +POSTHOOK: query: insert overwrite table tbl2 select * from src where key < 200 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl2 +POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed +explain +select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed +explain +select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 189 Data size: 1891 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 207 Data size: 2080 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 207 Data size: 2080 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +207 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_15.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_15.q.out new file mode 100644 index 0000000..255b185 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_15.q.out @@ -0,0 +1,198 @@ +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl1 +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl1 +PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl2 +POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl2 +PREHOOK: query: insert overwrite table tbl1 select * from src where key < 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl1 +POSTHOOK: query: insert overwrite table tbl1 select * from src where key < 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl1 +POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2 select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl2 +POSTHOOK: query: insert overwrite table tbl2 select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl2 +POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 1 Map 1 + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain +select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Right Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 22 Data size: 176 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 20 Data size: 160 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_16.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_16.q.out new file mode 100644 index 0000000..b32ac06 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_16.q.out @@ -0,0 +1,252 @@ +PREHOOK: query: CREATE TABLE stage_bucket_big +( +key BIGINT, +value STRING +) +PARTITIONED BY (file_tag STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stage_bucket_big +POSTHOOK: query: CREATE TABLE stage_bucket_big +( +key BIGINT, +value STRING +) +PARTITIONED BY (file_tag STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stage_bucket_big +PREHOOK: query: CREATE TABLE bucket_big +( +key BIGINT, +value STRING +) +PARTITIONED BY (day STRING, pri bigint) +clustered by (key) sorted by (key) into 12 buckets +stored as RCFile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big +( +key BIGINT, +value STRING +) +PARTITIONED BY (day STRING, pri bigint) +clustered by (key) sorted by (key) into 12 buckets +stored as RCFile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: CREATE TABLE stage_bucket_small +( +key BIGINT, +value string +) +PARTITIONED BY (file_tag STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@stage_bucket_small +POSTHOOK: query: CREATE TABLE stage_bucket_small +( +key BIGINT, +value string +) +PARTITIONED BY (file_tag STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@stage_bucket_small +PREHOOK: query: CREATE TABLE bucket_small +( +key BIGINT, +value string +) +PARTITIONED BY (pri bigint) +clustered by (key) sorted by (key) into 12 buckets +stored as RCFile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: CREATE TABLE bucket_small +( +key BIGINT, +value string +) +PARTITIONED BY (pri bigint) +clustered by (key) sorted by (key) into 12 buckets +stored as RCFile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='1') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@stage_bucket_small +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='1') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@stage_bucket_small +POSTHOOK: Output: default@stage_bucket_small@file_tag=1 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='2') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@stage_bucket_small +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite into table stage_bucket_small partition (file_tag='2') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@stage_bucket_small +POSTHOOK: Output: default@stage_bucket_small@file_tag=2 +PREHOOK: query: insert overwrite table bucket_small partition(pri) +select +key, +value, +file_tag as pri +from +stage_bucket_small +where file_tag between 1 and 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@stage_bucket_small +PREHOOK: Input: default@stage_bucket_small@file_tag=1 +PREHOOK: Input: default@stage_bucket_small@file_tag=2 +PREHOOK: Output: default@bucket_small +POSTHOOK: query: insert overwrite table bucket_small partition(pri) +select +key, +value, +file_tag as pri +from +stage_bucket_small +where file_tag between 1 and 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stage_bucket_small +POSTHOOK: Input: default@stage_bucket_small@file_tag=1 +POSTHOOK: Input: default@stage_bucket_small@file_tag=2 +POSTHOOK: Output: default@bucket_small@pri=1 +POSTHOOK: Output: default@bucket_small@pri=2 +POSTHOOK: Lineage: bucket_small PARTITION(pri=1).key SIMPLE [(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint, comment:null), ] +POSTHOOK: Lineage: bucket_small PARTITION(pri=1).value SIMPLE [(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: bucket_small PARTITION(pri=2).key SIMPLE [(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint, comment:null), ] +POSTHOOK: Lineage: bucket_small PARTITION(pri=2).value SIMPLE [(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' overwrite into table stage_bucket_big partition (file_tag='1') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@stage_bucket_big +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' overwrite into table stage_bucket_big partition (file_tag='1') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@stage_bucket_big +POSTHOOK: Output: default@stage_bucket_big@file_tag=1 +PREHOOK: query: insert overwrite table bucket_big partition(day,pri) +select +key, +value, +'day1' as day, +1 as pri +from +stage_bucket_big +where +file_tag='1' +PREHOOK: type: QUERY +PREHOOK: Input: default@stage_bucket_big +PREHOOK: Input: default@stage_bucket_big@file_tag=1 +PREHOOK: Output: default@bucket_big +POSTHOOK: query: insert overwrite table bucket_big partition(day,pri) +select +key, +value, +'day1' as day, +1 as pri +from +stage_bucket_big +where +file_tag='1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@stage_bucket_big +POSTHOOK: Input: default@stage_bucket_big@file_tag=1 +POSTHOOK: Output: default@bucket_big@day=day1/pri=1 +POSTHOOK: Lineage: bucket_big PARTITION(day=day1,pri=1).key SIMPLE [(stage_bucket_big)stage_bucket_big.FieldSchema(name:key, type:bigint, comment:null), ] +POSTHOOK: Lineage: bucket_big PARTITION(day=day1,pri=1).value SIMPLE [(stage_bucket_big)stage_bucket_big.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: select +a.key , +a.value , +b.value , +'day1' as day, +1 as pri +from +( +select +key, +value +from bucket_big where day='day1' +) a +left outer join +( +select +key, +value +from bucket_small +where pri between 1 and 2 +) b +on +(a.key = b.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@day=day1/pri=1 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@pri=1 +PREHOOK: Input: default@bucket_small@pri=2 +#### A masked pattern was here #### +POSTHOOK: query: select +a.key , +a.value , +b.value , +'day1' as day, +1 as pri +from +( +select +key, +value +from bucket_big where day='day1' +) a +left outer join +( +select +key, +value +from bucket_small +where pri between 1 and 2 +) b +on +(a.key = b.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@day=day1/pri=1 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@pri=1 +POSTHOOK: Input: default@bucket_small@pri=2 +#### A masked pattern was here #### +0 val_0 val_0 day1 1 +0 val_0 val_0 day1 1 +0 val_0 val_0 day1 1 +0 val_0 val_0 day1 1 +0 val_0 val_0 day1 1 +0 val_0 val_0 day1 1 +103 val_103 val_103 day1 1 +103 val_103 val_103 day1 1 +103 val_103 val_103 day1 1 +103 val_103 val_103 day1 1 +374 val_374 val_374 day1 1 +374 val_374 val_374 day1 1 +172 val_172 val_172 day1 1 +172 val_172 val_172 day1 1 +172 val_172 val_172 day1 1 +172 val_172 val_172 day1 1 +169 val_169 val_169 day1 1 +169 val_169 val_169 day1 1 +169 val_169 val_169 day1 1 +169 val_169 val_169 day1 1 +169 val_169 val_169 day1 1 +169 val_169 val_169 day1 1 +169 val_169 val_169 day1 1 +169 val_169 val_169 day1 1 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_2.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_2.q.out new file mode 100644 index 0000000..ebcffcb --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_2.q.out @@ -0,0 +1,717 @@ +PREHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly +explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly +explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + /bucket_big/ds=2008-04-09 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +38 +PREHOOK: query: -- The mapjoin should fail resulting in the sort-merge join +explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The mapjoin should fail resulting in the sort-merge join +explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + /bucket_big/ds=2008-04-09 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +#### A masked pattern was here #### +38 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_3.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_3.q.out new file mode 100644 index 0000000..af5f1ce --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_3.q.out @@ -0,0 +1,1029 @@ +PREHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + /bucket_small/ds=2008-04-09 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +38 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + /bucket_small/ds=2008-04-09 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +38 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + /bucket_small/ds=2008-04-09 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +38 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_4.q.out new file mode 100644 index 0000000..d12da62 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_4.q.out @@ -0,0 +1,1045 @@ +PREHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 2 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + /bucket_small/ds=2008-04-09 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +38 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + /bucket_small/ds=2008-04-09 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 2 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +38 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + /bucket_small/ds=2008-04-09 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 2 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +38 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_5.q.out new file mode 100644 index 0000000..d238592 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_5.q.out @@ -0,0 +1,821 @@ +PREHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket +CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket +CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: bucket_big + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: bucket_small + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_small +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_small +#### A masked pattern was here #### +19 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: bucket_small + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: bucket_big + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_small +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_small +#### A masked pattern was here #### +19 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 113 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: bucket_small + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 14 Data size: 1425 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 15 Data size: 1567 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: bucket_big + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_small +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_small +#### A masked pattern was here #### +19 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_6.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_6.q.out new file mode 100644 index 0000000..c446dbf --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_6.q.out @@ -0,0 +1,1248 @@ +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl1 +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl1 +PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl2 +POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl2 +PREHOOK: query: CREATE TABLE tbl3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl3 +POSTHOOK: query: CREATE TABLE tbl3(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl3 +PREHOOK: query: CREATE TABLE tbl4(key int, value string) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl4 +POSTHOOK: query: CREATE TABLE tbl4(key int, value string) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl4 +PREHOOK: query: insert overwrite table tbl1 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl1 +POSTHOOK: query: insert overwrite table tbl1 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl1 +POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl2 +POSTHOOK: query: insert overwrite table tbl2 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl2 +POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl3 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl3 +POSTHOOK: query: insert overwrite table tbl3 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl3 +POSTHOOK: Lineage: tbl3.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl4 select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl4 +POSTHOOK: query: insert overwrite table tbl4 select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl4 +POSTHOOK: Lineage: tbl4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key + +-- Three tests below are all the same query with different alias, which changes dispatch order of GenMapRedWalker +-- This is dependent to iteration order of HashMap, so can be meaningless in non-sun jdk +-- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] +-- c = TS[1]-RS[7]-JOIN[8] +-- a = TS[2]-MAPJOIN[11] +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key + +-- Three tests below are all the same query with different alias, which changes dispatch order of GenMapRedWalker +-- This is dependent to iteration order of HashMap, so can be meaningless in non-sun jdk +-- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] +-- c = TS[1]-RS[7]-JOIN[8] +-- a = TS[2]-MAPJOIN[11] +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and value is not null) (type: boolean) + Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- d = TS[0]-RS[7]-JOIN[8]-SEL[9]-FS[10] +-- b = TS[1]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8] +-- a = TS[2]-MAPJOIN[11] +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value +PREHOOK: type: QUERY +POSTHOOK: query: -- d = TS[0]-RS[7]-JOIN[8]-SEL[9]-FS[10] +-- b = TS[1]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8] +-- a = TS[2]-MAPJOIN[11] +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: d + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and value is not null) (type: boolean) + Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] +-- a = TS[1]-MAPJOIN[11] +-- h = TS[2]-RS[7]-JOIN[8] +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value +PREHOOK: type: QUERY +POSTHOOK: query: -- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] +-- a = TS[1]-MAPJOIN[11] +-- h = TS[2]-RS[7]-JOIN[8] +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and value is not null) (type: boolean) + Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: h + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 + 1 + 2 + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 4 <- Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 + 1 + 2 + keys: + 0 key (type: int) + 1 key (type: int) + 2 key (type: int) + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl3 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and value is not null) (type: boolean) + Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl4 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl4 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and value is not null) (type: boolean) + Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(key) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(key) (type: double) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 + 1 + 2 + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 4 <- Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 + 1 + 2 + keys: + 0 key (type: int) + 1 key (type: int) + 2 key (type: int) + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl3 +#### A masked pattern was here #### +2654 +PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +PREHOOK: type: QUERY +POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key +explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and value is not null) (type: boolean) + Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +PREHOOK: Input: default@tbl4 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +POSTHOOK: Input: default@tbl4 +#### A masked pattern was here #### +2654 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_7.q.out new file mode 100644 index 0000000..424f191 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_7.q.out @@ -0,0 +1,1215 @@ +PREHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket3outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket4outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 2 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + /bucket_big/ds=2008-04-09 [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + /bucket_small/ds=2008-04-09 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +76 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + /bucket_small/ds=2008-04-09 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 2 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + /bucket_big/ds=2008-04-09 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +76 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 4 Data size: 452 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 2 Data size: 226 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 226 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + /bucket_small/ds=2008-04-09 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 54 Data size: 5500 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 27 Data size: 2750 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 2 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 29 Data size: 3025 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 2750 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + /bucket_big/ds=2008-04-09 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +76 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_8.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_8.q.out new file mode 100644 index 0000000..a2d8e0f --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_8.q.out @@ -0,0 +1,1217 @@ +PREHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_small +POSTHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket +CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_small +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_small@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/smallsrcsortbucket2outof4.txt' INTO TABLE bucket_small partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_small@ds=2008-04-09 +PREHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_big +POSTHOOK: query: CREATE TABLE bucket_big (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_big +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket1outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket3outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@bucket_big@ds=2008-04-09 +POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.txt' INTO TABLE bucket_big partition(ds='2008-04-09') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@bucket_big@ds=2008-04-09 +PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter +explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_small + a + TOK_TABREF + TOK_TABNAME + bucket_big + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 3 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 0 Map 3 + Position of Big Table: 1 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [b] + /bucket_big/ds=2008-04-09 [b] + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 0 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [a] + /bucket_small/ds=2008-04-09 [a] + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +76 +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + /bucket_small/ds=2008-04-09 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + /bucket_big/ds=2008-04-09 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +76 +PREHOOK: query: -- The mapjoin should fail resulting in the sort-merge join +explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The mapjoin should fail resulting in the sort-merge join +explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_JOIN + TOK_TABREF + TOK_TABNAME + bucket_big + a + TOK_TABREF + TOK_TABNAME + bucket_small + b + = + . + TOK_TABLE_OR_COL + a + key + . + TOK_TABLE_OR_COL + b + key + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTIONSTAR + count + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 228 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + tag: 1 + auto parallelism: true + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + numFiles 2 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 114 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 2 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_small + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_small { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_small + name: default.bucket_small + Truncated Path -> Alias: + /bucket_small/ds=2008-04-08 [b] + /bucket_small/ds=2008-04-09 [b] + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Filter Operator + isSamplingPred: false + predicate: key is not null (type: boolean) + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Estimated key counts: Map 1 => 1 + keys: + 0 key (type: string) + 1 key (type: string) + input vertices: + 1 Map 1 + Position of Big Table: 0 + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + tag: -1 + value expressions: _col0 (type: bigint) + auto parallelism: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-08 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-08 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big +#### A masked pattern was here #### + Partition + base file name: ds=2008-04-09 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + partition values: + ds 2008-04-09 + properties: + COLUMN_STATS_ACCURATE true + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + numFiles 4 + numRows 0 + partition_columns ds + partition_columns.types string + rawDataSize 0 + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 5812 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + SORTBUCKETCOLSPREFIX TRUE + bucket_count 4 + bucket_field_name key + columns key,value + columns.comments + columns.types string:string +#### A masked pattern was here #### + name default.bucket_big + partition_columns ds + partition_columns.types string + serialization.ddl struct bucket_big { string key, string value} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.bucket_big + name: default.bucket_big + Truncated Path -> Alias: + /bucket_big/ds=2008-04-08 [a] + /bucket_big/ds=2008-04-09 [a] + Reducer 3 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_big +PREHOOK: Input: default@bucket_big@ds=2008-04-08 +PREHOOK: Input: default@bucket_big@ds=2008-04-09 +PREHOOK: Input: default@bucket_small +PREHOOK: Input: default@bucket_small@ds=2008-04-08 +PREHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_big +POSTHOOK: Input: default@bucket_big@ds=2008-04-08 +POSTHOOK: Input: default@bucket_big@ds=2008-04-09 +POSTHOOK: Input: default@bucket_small +POSTHOOK: Input: default@bucket_small@ds=2008-04-08 +POSTHOOK: Input: default@bucket_small@ds=2008-04-09 +#### A masked pattern was here #### +76 diff --git a/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_9.q.out b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_9.q.out new file mode 100644 index 0000000..571ea0a --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/auto_sortmerge_join_9.q.out @@ -0,0 +1,3674 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl1 +PREHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl2 +POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl2 +PREHOOK: query: insert overwrite table tbl1 +select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl1 +POSTHOOK: query: insert overwrite table tbl1 +select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl1 +POSTHOOK: Lineage: tbl1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table tbl2 +select * from src where key < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@tbl2 +POSTHOOK: query: insert overwrite table tbl2 +select * from src where key < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@tbl2 +POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join +explain +select count(*) from ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join +explain +select count(*) from ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 1 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +22 +PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join +explain +select key, count(*) from +( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +group by key +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join +explain +select key, count(*) from +( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +group by key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select key, count(*) from +( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select key, count(*) from +( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +0 9 +2 1 +4 1 +5 9 +8 1 +9 1 +PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join +explain +select count(*) from +( + select key, count(*) from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 + group by key +) subq2 +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join +explain +select count(*) from +( + select key, count(*) from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 + group by key +) subq2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from +( + select key, count(*) from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 + group by key +) subq2 +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from +( + select key, count(*) from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 + group by key +) subq2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +6 +PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. +-- Each sub-query should be converted to a sort-merge join. +explain +select src1.key, src1.cnt1, src2.cnt1 from +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 group by key +) src1 +join +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq2 group by key +) src2 +on src1.key = src2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. +-- Each sub-query should be converted to a sort-merge join. +explain +select src1.key, src1.cnt1, src2.cnt1 from +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 group by key +) src1 +join +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq2 group by key +) src2 +on src1.key = src2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Map 4 <- Map 6 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE), Reducer 5 (BROADCAST_EDGE) + Reducer 5 <- Map 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 6 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 6 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col1} + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col3 + input vertices: + 1 Reducer 5 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 group by key +) src1 +join +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq2 group by key +) src2 +on src1.key = src2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 group by key +) src1 +join +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq2 group by key +) src2 +on src1.key = src2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +0 9 9 +2 1 1 +4 1 1 +5 9 9 +8 1 1 +9 1 1 +PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 3 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join, although there is more than one level of sub-query +explain +select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join tbl2 b + on subq2.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join, although there is more than one level of sub-query +explain +select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join tbl2 b + on subq2.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join tbl2 b + on subq2.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join tbl2 b + on subq2.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. +-- The join should be converted to a sort-merge join +explain +select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq3 + where key < 6 + ) subq4 + on subq2.key = subq4.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. +-- The join should be converted to a sort-merge join +explain +select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq3 + where key < 6 + ) subq4 + on subq2.key = subq4.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 1 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq3 + where key < 6 + ) subq4 + on subq2.key = subq4.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq3 + where key < 6 + ) subq4 + on subq2.key = subq4.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +#### A masked pattern was here #### +20 +PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key +-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one +-- item, but that is not part of the join key. +explain +select count(*) from + (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + join + (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key +-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one +-- item, but that is not part of the join key. +explain +select count(*) from + (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + join + (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 8) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 3 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 8) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + join + (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + join + (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside +-- join should be performed +explain +select count(*) from + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + join + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside +-- join should be performed +explain +select count(*) from + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + join + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + 1) (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: _col0 is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (key + 1) (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: _col0 is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + join + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 + join + (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +22 +PREHOOK: query: -- The left table is a sub-query and the right table is not. +-- It should be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join tbl2 a on subq1.key = a.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The left table is a sub-query and the right table is not. +-- It should be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join tbl2 a on subq1.key = a.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 key (type: int) + input vertices: + 0 Map 1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join tbl2 a on subq1.key = a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join tbl2 a on subq1.key = a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- The right table is a sub-query and the left table is not. +-- It should be converted to a sort-merge join. +explain +select count(*) from tbl1 a + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + on a.key = subq1.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The right table is a sub-query and the left table is not. +-- It should be converted to a sort-merge join. +explain +select count(*) from tbl1 a + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + on a.key = subq1.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from tbl1 a + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + on a.key = subq1.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from tbl1 a + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + on a.key = subq1.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. +-- It should be converted to to a sort-merge join +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on (subq1.key = subq2.key) + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + on (subq1.key = subq3.key) +PREHOOK: type: QUERY +POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. +-- It should be converted to to a sort-merge join +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on (subq1.key = subq2.key) + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + on (subq1.key = subq3.key) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 + 1 + 2 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + 2 _col0 (type: int) + input vertices: + 1 Map 4 + 2 Map 1 + Statistics: Num rows: 4 Data size: 30 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 4 Data size: 30 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + on (subq1.key = subq3.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + on (subq1.key = subq3.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +56 +PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. +-- The join should be converted to a sort-merge join +explain +select count(*) from ( + select subq2.key as key, subq2.value as value1, b.value as value2 from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 +join tbl2 b +on subq2.key = b.key) a +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. +-- The join should be converted to a sort-merge join +explain +select count(*) from ( + select subq2.key as key, subq2.value as value1, b.value as value2 from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 +join tbl2 b +on subq2.key = b.key) a +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from ( + select subq2.key as key, subq2.value as value1, b.value as value2 from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 +join tbl2 b +on subq2.key = b.key) a +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from ( + select subq2.key as key, subq2.value as value1, b.value as value2 from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 +join tbl2 b +on subq2.key = b.key) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join +explain +select count(*) from ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join +explain +select count(*) from ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 1 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +22 +PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join +explain +select key, count(*) from +( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +group by key +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join +explain +select key, count(*) from +( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +group by key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select key, count(*) from +( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select key, count(*) from +( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +) subq1 +group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +0 9 +2 1 +4 1 +5 9 +8 1 +9 1 +PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join +explain +select count(*) from +( + select key, count(*) from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 + group by key +) subq2 +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join +explain +select count(*) from +( + select key, count(*) from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 + group by key +) subq2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from +( + select key, count(*) from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 + group by key +) subq2 +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from +( + select key, count(*) from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 + group by key +) subq2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +6 +PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. +-- Each sub-query should be converted to a sort-merge join. +explain +select src1.key, src1.cnt1, src2.cnt1 from +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 group by key +) src1 +join +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq2 group by key +) src2 +on src1.key = src2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. +-- Each sub-query should be converted to a sort-merge join. +explain +select src1.key, src1.cnt1, src2.cnt1 from +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 group by key +) src1 +join +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq2 group by key +) src2 +on src1.key = src2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Map 4 <- Map 6 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE), Reducer 5 (BROADCAST_EDGE) + Reducer 5 <- Map 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 6 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 6 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col0} {_col1} + 1 {_col1} + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col3 + input vertices: + 1 Reducer 5 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 group by key +) src1 +join +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq2 group by key +) src2 +on src1.key = src2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select src1.key, src1.cnt1, src2.cnt1 from +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq1 group by key +) src1 +join +( + select key, count(*) as cnt1 from + ( + select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key + ) subq2 group by key +) src2 +on src1.key = src2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +0 9 9 +2 1 1 +4 1 1 +5 9 9 +8 1 1 +9 1 1 +PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 3 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join, although there is more than one level of sub-query +explain +select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join tbl2 b + on subq2.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should +-- be converted to a sort-merge join, although there is more than one level of sub-query +explain +select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join tbl2 b + on subq2.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join tbl2 b + on subq2.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join tbl2 b + on subq2.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. +-- The join should be converted to a sort-merge join +explain +select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq3 + where key < 6 + ) subq4 + on subq2.key = subq4.key +PREHOOK: type: QUERY +POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. +-- The join should be converted to a sort-merge join +explain +select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq3 + where key < 6 + ) subq4 + on subq2.key = subq4.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 1 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq3 + where key < 6 + ) subq4 + on subq2.key = subq4.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 + join + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq3 + where key < 6 + ) subq4 + on subq2.key = subq4.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +#### A masked pattern was here #### +20 +PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key +-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one +-- item, but that is not part of the join key. +explain +select count(*) from + (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + join + (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key +-- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one +-- item, but that is not part of the join key. +explain +select count(*) from + (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + join + (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 8) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 3 + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 8) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + join + (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + on subq1.key = subq2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 + join + (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 + on subq1.key = subq2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- The left table is a sub-query and the right table is not. +-- It should be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join tbl2 a on subq1.key = a.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The left table is a sub-query and the right table is not. +-- It should be converted to a sort-merge join. +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join tbl2 a on subq1.key = a.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 key (type: int) + input vertices: + 0 Map 1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join tbl2 a on subq1.key = a.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join tbl2 a on subq1.key = a.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- The right table is a sub-query and the left table is not. +-- It should be converted to a sort-merge join. +explain +select count(*) from tbl1 a + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + on a.key = subq1.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The right table is a sub-query and the left table is not. +-- It should be converted to a sort-merge join. +explain +select count(*) from tbl1 a + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + on a.key = subq1.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 _col0 (type: int) + input vertices: + 1 Map 1 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from tbl1 a + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + on a.key = subq1.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from tbl1 a + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 + on a.key = subq1.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 +PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. +-- It should be converted to to a sort-merge join +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on (subq1.key = subq2.key) + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + on (subq1.key = subq3.key) +PREHOOK: type: QUERY +POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. +-- It should be converted to to a sort-merge join +explain +select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on (subq1.key = subq2.key) + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + on (subq1.key = subq3.key) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 + 1 + 2 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + 2 _col0 (type: int) + input vertices: + 1 Map 4 + 2 Map 1 + Statistics: Num rows: 4 Data size: 30 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 4 Data size: 30 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key < 6) and key is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 14 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + on (subq1.key = subq3.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from + (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 + on subq1.key = subq2.key + join + (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 + on (subq1.key = subq3.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +56 +PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. +-- The join should be converted to a sort-merge join +explain +select count(*) from ( + select subq2.key as key, subq2.value as value1, b.value as value2 from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 +join tbl2 b +on subq2.key = b.key) a +PREHOOK: type: QUERY +POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. +-- The join should be converted to a sort-merge join +explain +select count(*) from ( + select subq2.key as key, subq2.value as value1, b.value as value2 from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 +join tbl2 b +on subq2.key = b.key) a +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 5 Data size: 35 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 10 Data size: 70 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((key < 8) and (key < 6)) and key is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from ( + select subq2.key as key, subq2.value as value1, b.value as value2 from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 +join tbl2 b +on subq2.key = b.key) a +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl1 +PREHOOK: Input: default@tbl2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from ( + select subq2.key as key, subq2.value as value1, b.value as value2 from + ( + select * from + ( + select a.key as key, a.value as value from tbl1 a where key < 8 + ) subq1 + where key < 6 + ) subq2 +join tbl2 b +on subq2.key = b.key) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl1 +POSTHOOK: Input: default@tbl2 +#### A masked pattern was here #### +20 diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out index b3f16c1..512349d 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out @@ -145,6 +145,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col7 + input vertices: + 0 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) @@ -222,6 +224,8 @@ STAGE PLANS: 0 _col1 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string) @@ -337,6 +341,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col6, _col7 + input vertices: + 1 Map 4 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col6 (type: int), _col7 (type: string) @@ -390,6 +396,8 @@ STAGE PLANS: 0 _col1 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 1 Map 1 Statistics: Num rows: 150 Data size: 1600 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string) @@ -450,6 +458,8 @@ STAGE PLANS: 0 _col1 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: int), _col0 (type: double), _col3 (type: string) @@ -480,6 +490,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 4 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string) @@ -604,6 +616,9 @@ STAGE PLANS: 1 key (type: int) 2 key (type: int) outputColumnNames: _col0, _col1, _col7 + input vertices: + 1 Map 1 + 2 Map 2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) @@ -674,6 +689,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string) @@ -689,6 +706,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 1 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) @@ -763,6 +782,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string) @@ -864,6 +885,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: double), _col3 (type: string) @@ -958,6 +981,8 @@ STAGE PLANS: 0 value (type: string) 1 value (type: string) outputColumnNames: _col0, _col1, _col7 + input vertices: + 0 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) @@ -1050,6 +1075,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1, _col6 + input vertices: + 0 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) @@ -1130,6 +1157,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 0 Map 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -1141,6 +1170,8 @@ STAGE PLANS: 0 _col1 (type: string) 1 value (type: string) outputColumnNames: _col0, _col12 + input vertices: + 1 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col12 (type: int) diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out index 6804f91..a538839 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out @@ -155,6 +155,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 0 Map 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -166,6 +168,8 @@ STAGE PLANS: 0 _col1 (type: string) 1 value (type: string) outputColumnNames: _col0, _col12 + input vertices: + 1 Map 1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col12 (type: int) @@ -258,6 +262,8 @@ STAGE PLANS: 0 UDFToDouble(key) (type: double) 1 UDFToDouble(key) (type: double) outputColumnNames: _col0, _col1, _col6 + input vertices: + 0 Map 2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) @@ -330,6 +336,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 2 Statistics: Num rows: 91 Data size: 969 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -405,6 +413,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 2 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -497,6 +507,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1 + input vertices: + 0 Map 1 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -550,6 +562,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 0 Reducer 3 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: int) @@ -641,6 +655,8 @@ STAGE PLANS: 0 UDFToDouble(_col0) (type: double) 1 UDFToDouble(key) (type: double) outputColumnNames: _col0, _col2 + input vertices: + 0 Reducer 3 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out b/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out new file mode 100644 index 0000000..5920612 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/cbo_correctness.q.out @@ -0,0 +1,18962 @@ +PREHOOK: query: drop table if exists t1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists t3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists t3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create table t1(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 +POSTHOOK: query: create table t2(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t2 +PREHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t3 +POSTHOOK: query: create table t3(key string, value string, c_int int, c_float float, c_boolean boolean) row format delimited fields terminated by ',' STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t3 +PREHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t1 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t1.txt' into table t1 partition (dt='2014') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1@dt=2014 +PREHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t2 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t2.txt' into table t2 partition (dt='2014') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2@dt=2014 +PREHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@t3 +POSTHOOK: query: load data local inpath '../../data/files/cbo_t3.txt' into table t3 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@t3 +PREHOOK: query: CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@part +POSTHOOK: query: CREATE TABLE part( + p_partkey INT, + p_name STRING, + p_mfgr STRING, + p_brand STRING, + p_type STRING, + p_size INT, + p_container STRING, + p_retailprice DOUBLE, + p_comment STRING +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@part +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@part +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwrite into table part +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@part +PREHOOK: query: DROP TABLE lineitem +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE lineitem +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@lineitem +POSTHOOK: query: CREATE TABLE lineitem (L_ORDERKEY INT, + L_PARTKEY INT, + L_SUPPKEY INT, + L_LINENUMBER INT, + L_QUANTITY DOUBLE, + L_EXTENDEDPRICE DOUBLE, + L_DISCOUNT DOUBLE, + L_TAX DOUBLE, + L_RETURNFLAG STRING, + L_LINESTATUS STRING, + l_shipdate STRING, + L_COMMITDATE STRING, + L_RECEIPTDATE STRING, + L_SHIPINSTRUCT STRING, + L_SHIPMODE STRING, + L_COMMENT STRING) +ROW FORMAT DELIMITED +FIELDS TERMINATED BY '|' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@lineitem +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@lineitem +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/lineitem.txt' OVERWRITE INTO TABLE lineitem +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@lineitem +PREHOOK: query: create table src_cbo as select * from src +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@src_cbo +POSTHOOK: query: create table src_cbo as select * from src +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_cbo +PREHOOK: query: analyze table t1 partition (dt) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Output: default@t1 +PREHOOK: Output: default@t1@dt=2014 +POSTHOOK: query: analyze table t1 partition (dt) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Output: default@t1 +POSTHOOK: Output: default@t1@dt=2014 +PREHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t1 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: analyze table t2 partition (dt) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Output: default@t2 +PREHOOK: Output: default@t2@dt=2014 +POSTHOOK: query: analyze table t2 partition (dt) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Output: default@t2 +POSTHOOK: Output: default@t2@dt=2014 +PREHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t2 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: analyze table t3 compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +PREHOOK: Output: default@t3 +POSTHOOK: query: analyze table t3 compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@t3 +PREHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: analyze table t3 compute statistics for columns key, value, c_int, c_float, c_boolean +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +PREHOOK: query: analyze table src_cbo compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +PREHOOK: Output: default@src_cbo +POSTHOOK: query: analyze table src_cbo compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +POSTHOOK: Output: default@src_cbo +PREHOOK: query: analyze table src_cbo compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: analyze table src_cbo compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +PREHOOK: query: analyze table part compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@part +PREHOOK: Output: default@part +POSTHOOK: query: analyze table part compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +POSTHOOK: Output: default@part +PREHOOK: query: analyze table part compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: analyze table part compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +PREHOOK: query: analyze table lineitem compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +PREHOOK: Output: default@lineitem +POSTHOOK: query: analyze table lineitem compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +POSTHOOK: Output: default@lineitem +PREHOOK: query: analyze table lineitem compute statistics for columns +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: analyze table lineitem compute statistics for columns +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +PREHOOK: query: -- 1. Test Select + TS +select * from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 1. Test Select + TS +select * from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from t1 as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select * from t1 as t2 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +null NULL NULL +null NULL NULL +PREHOOK: query: -- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 2. Test Select + TS + FIL +select * from t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 3 Test Select + Select + TS + FIL +select * from (select * from t1 where t1.c_int >= 0) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select * from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +PREHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t2.key as x, c_int as c_int, (((c_int+c_float)*10)+5) as y from t1 as t2 where t2.c_int >= 0 and c_float+c_int >= 0 or c_float <= 100) as t1 where t1.c_int >= 0 and y+c_int >= 0 or x <= 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 + 1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +1 1 25.0 +PREHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t1 where t1.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t2.c_int+c_float as x , c_int as c_int, (((c_int+c_float)*10)+5) as y from (select * from t1 where t1.c_int >= 0) as t2 where t2.c_int >= 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +2.0 1 25.0 +PREHOOK: query: -- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 4. Test Select + Join + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.key from t1 join t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key from t1 join t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +null +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t1.key from t1 join t3 where t1.key=t3.key and t1.key >= 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL 2 +NULL NULL +NULL NULL +NULL NULL +NULL NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select a, t1.b, key, t2.c_int, t3.p from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=key join (select key as p, c_int as q, t3.c_float as r from t3)t3 on t1.a=t3.p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 + 1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +1 1 1 1 1 +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +null NULL null NULL null +PREHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.c_int, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +1 1.0 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select t3.c_int, b, t2.c_int, t1.c from (select key as a, c_int as b, t1.c_float as c from t1) t1 join t2 on t1.a=t2.key join t3 on t1.a=t3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +1 1 1 1.0 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p left outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p right outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.a=p join t3 on t1.a=key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +NULL NULL null NULL NULL +PREHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key, t1.c_int, t2.p, q from t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2) t2 on t1.key=p full outer join (select key as a, c_int as b, t3.c_float as c from t3)t3 on t1.key=a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 + 1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +NULL NULL NULL NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +null NULL null NULL +PREHOOK: query: -- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 5. Test Select + Join + FIL + TS +select t1.c_int, t2.c_int from t1 join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 left outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 right outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int, t2.c_int from t1 full outer join t2 on t1.key=t2.key where (t1.c_int + t2.c_int == 2) and (t1.c_int > 0 or t2.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +PREHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select b, t1.c, t2.p, q, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or t2.q >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +1 1.0 1 1 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 right outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select q, b, t2.p, t1.c, t3.c_int from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 full outer join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q == 2) and (b > 0 or c_int >= 0)) R where (q + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +1 1 1 1.0 1 +PREHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 6. Test Select + TS + Join + Fil + GB + GB Having +select * from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL +1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL + 1 4 2 + 1 4 2 +1 4 12 +1 4 2 +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +2 5.0 +12 5.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b+c, a desc) t1 right outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p right outer join t3 on t1.a=key where (b + t2.q >= 2) and (b > 0 or c_int >= 0) group by t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by c+a desc) t1 full outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by p+q desc, r asc) t2 on t1.a=p full outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 2 +1 12 +PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit +select * from t1 group by c_int limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from t1 group by c_float, t1.c_int, key order by x limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL NULL +PREHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key, (c_int+c_float+1+2) as x, sum(c_int) as y from t1 group by c_float, t1.c_int, key) R group by y, x order by x,y limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +NULL NULL +PREHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key from(select key from (select key from t1 limit 5)t2 limit 5)t3 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +PREHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select key, c_int from(select key, c_int from (select key, c_int from t1 order by c_int limit 5)t1 order by c_int limit 5)t2 order by c_int limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +null NULL +null NULL +1 1 +1 1 +1 1 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key order by a limit 5) t1 join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key order by q/10 desc, r asc limit 5) t2 on t1.a=p join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c order by t3.c_int+c desc, c limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by b % c asc, b desc limit 5) t1 left outer join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 limit 5) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t2.q >= 0) and (b > 0 or c_int >= 0) group by t3.c_int, c having t3.c_int > 0 and (c_int >=1 or c >= 1) and (c_int + c) >= 0 order by t3.c_int % c asc, t3.c_int desc limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 12 +1 2 +PREHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 8. Test UDF/UDAF +select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +20 18 18 1.0 1 1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int), case c_int when 0 then 1 when 1 then 2 else 3 end, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 18 18 1.0 1 1 2 36 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +20 1 18 1.0 1 1 +PREHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(*) as a, count(distinct c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f, case c_int when 0 then 1 when 1 then 2 else 3 end as g, sum(case c_int when 0 then 1 when 1 then 2 else 3 end) as h from t1 group by c_int) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 0 NULL NULL NULL NULL 3 6 +18 1 18 1.0 1 1 2 36 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(c_int) as b, sum(c_int) as c, avg(c_int) as d, max(c_int) as e, min(c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 20 1 18 +PREHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select f,a,e,b from (select count(*) as a, count(distinct c_int) as b, sum(distinct c_int) as c, avg(distinct c_int) as d, max(distinct c_int) as e, min(distinct c_int) as f from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 20 1 1 +PREHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) as a, avg(c_float), key from t1 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +2 1.0 1 +2 1.0 1 +12 1.0 1 +2 1.0 1 +0 NULL null +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +0 NULL +1 1.0 +PREHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 9. Test Windowing Functions +select count(c_int) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(c_int) over(), sum(c_float) over(), max(c_int) over(), min(c_int) over(), row_number() over(), rank() over(), dense_rank() over(), percent_rank() over(), lead(c_int, 2, c_int) over(), lag(c_float, 2, c_float) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 18.0 1 1 1 1 1 0.0 1 NULL +18 18.0 1 1 2 1 1 0.0 1 NULL +18 18.0 1 1 3 1 1 0.0 1 NULL +18 18.0 1 1 4 1 1 0.0 1 NULL +18 18.0 1 1 5 1 1 0.0 1 1.0 +18 18.0 1 1 6 1 1 0.0 1 1.0 +18 18.0 1 1 7 1 1 0.0 1 1.0 +18 18.0 1 1 8 1 1 0.0 1 1.0 +18 18.0 1 1 9 1 1 0.0 1 1.0 +18 18.0 1 1 10 1 1 0.0 1 1.0 +18 18.0 1 1 11 1 1 0.0 1 1.0 +18 18.0 1 1 12 1 1 0.0 1 1.0 +18 18.0 1 1 13 1 1 0.0 1 1.0 +18 18.0 1 1 14 1 1 0.0 1 1.0 +18 18.0 1 1 15 1 1 0.0 1 1.0 +18 18.0 1 1 16 1 1 0.0 1 1.0 +18 18.0 1 1 17 1 1 0.0 1 1.0 +18 18.0 1 1 18 1 1 0.0 1 1.0 +18 18.0 1 1 19 1 1 0.0 1 1.0 +18 18.0 1 1 20 1 1 0.0 1 1.0 +PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +18 +PREHOOK: query: select 1+sum(c_int) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select 1+sum(c_int) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +19 +PREHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select sum(c_int)+sum(sum(c_int)) over() from t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +36 +PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 3.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 4.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 5.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 6.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 7.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 8.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 9.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 10.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 11.0 1.0 2.0 1.0 1.0 +1 1 2 1.0 12.0 1.0 2.0 1.0 1.0 +1 1 1 1.0 1.0 1.0 1.0 1.0 1.0 +1 1 2 1.0 2.0 1.0 2.0 1.0 1.0 +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2 +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL +PREHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from t1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v1 +POSTHOOK: query: -- 10. Test views +create view v1 as select c_int, value, c_boolean, dt from t1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v1 +PREHOOK: query: create view v2 as select c_int, value from t2 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t2 +PREHOOK: Output: database:default +PREHOOK: Output: default@v2 +POSTHOOK: query: create view v2 as select c_int, value from t2 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t2 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v2 +PREHOOK: query: select value from v1 where c_boolean=false +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select value from v1 where c_boolean=false +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +1 +1 +PREHOOK: query: select max(c_int) from v1 group by (c_boolean) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select max(c_int) from v1 group by (c_boolean) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +NULL +1 +1 +PREHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join t2 on v1.c_int = t2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v2 +#### A masked pattern was here #### +POSTHOOK: query: select count(v1.c_int) from v1 join v2 on v1.c_int = v2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v2 +#### A masked pattern was here #### +234 +PREHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from v1 a join v1 b on a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +160 +PREHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Input: default@v1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v3 +POSTHOOK: query: create view v3 as select v1.value val from v1 join t1 on v1.c_boolean = t1.c_boolean +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@v1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v3 +PREHOOK: query: select count(val) from v3 where val != '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v3 +#### A masked pattern was here #### +POSTHOOK: query: select count(val) from v3 where val != '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v3 +#### A masked pattern was here #### +96 +PREHOOK: query: with q1 as ( select key from t1 where key = '1') +select count(*) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select key from t1 where key = '1') +select count(*) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +12 +PREHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select value from v1 where c_boolean = false) +select count(value) from q1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: create view v4 as +with q1 as ( select key,c_int from t1 where key = '1') +select * from q1 +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@v4 +POSTHOOK: query: create view v4 as +with q1 as ( select key,c_int from t1 where key = '1') +select * from q1 +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v4 +PREHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select c_int from q2 where c_boolean = false), +q2 as ( select c_int,c_boolean from v1 where value = '1') +select sum(c_int) from (select c_int from q1) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +#### A masked pattern was here #### +2 +PREHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@v1 +PREHOOK: Input: default@v4 +#### A masked pattern was here #### +POSTHOOK: query: with q1 as ( select t1.c_int c_int from q2 join t1 where q2.c_int = t1.c_int and t1.dt='2014'), +q2 as ( select c_int,c_boolean from v1 where value = '1' or dt = '14') +select count(*) from q1 join q2 join v4 on q1.c_int = q2.c_int and v4.c_int = q2.c_int +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@v1 +POSTHOOK: Input: default@v4 +#### A masked pattern was here #### +31104 +PREHOOK: query: drop view v1 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v1 +PREHOOK: Output: default@v1 +POSTHOOK: query: drop view v1 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v1 +POSTHOOK: Output: default@v1 +PREHOOK: query: drop view v2 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v2 +PREHOOK: Output: default@v2 +POSTHOOK: query: drop view v2 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v2 +POSTHOOK: Output: default@v2 +PREHOOK: query: drop view v3 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v3 +PREHOOK: Output: default@v3 +POSTHOOK: query: drop view v3 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v3 +POSTHOOK: Output: default@v3 +PREHOOK: query: drop view v4 +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v4 +PREHOOK: Output: default@v4 +POSTHOOK: query: drop view v4 +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v4 +POSTHOOK: Output: default@v4 +PREHOOK: query: -- 11. Union All +select * from t1 union all select * from t2 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 11. Union All +select * from t1 union all select * from t2 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 false 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 + 1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 false 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +1 1 1 1.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +2 2 2 2.0 true 2014 +null null NULL NULL NULL 2014 +null null NULL NULL NULL 2014 +PREHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select key from (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r2 where key >=0 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +PREHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select r2.key from (select key, c_int from (select key, c_int from t1 union all select key, c_int from t3 )r1 union all select key, c_int from t3)r2 join (select key, c_int from (select * from t1 union all select * from t2 where t2.key >=0)r1 union all select key, c_int from t3)r3 on r2.key=r3.key where r3.key >=0 order by r2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +2 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +3 +PREHOOK: query: -- 12. SemiJoin +select t1.c_int from t1 left semi join t2 on t1.key=t2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 12. SemiJoin +select t1.c_int from t1 left semi join t2 on t1.key=t2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +NULL +NULL +PREHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: select t1.c_int from t1 left semi join t2 on t1.key=t2.key where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +1 +PREHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c, b, a from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c >= 0)) R where (b + 1 = 2) and (R.b > 0 or c >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +1.0 1 1 +PREHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select t3.c_int, t1.c, b from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 = 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p left outer join t3 on t1.a=key where (b + t3.c_int == 2) and (b > 0 or c_int >= 0)) R where (R.c_int + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +1 1.0 1 +PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p right outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select c_int, b, t1.c from (select key as a, c_int as b, t1.c_float as c from t1 where (t1.c_int + 1 == 2) and (t1.c_int > 0 or t1.c_float >= 0)) t1 left semi join (select t2.key as p, t2.c_int as q, c_float as r from t2 where (t2.c_int + 1 == 2) and (t2.c_int > 0 or t2.c_float >= 0)) t2 on t1.a=p full outer join t3 on t1.a=key where (b + 1 == 2) and (b > 0 or c_int >= 0)) R where (c + 1 = 2) and (R.b > 0 or c_int >= 0) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +1 1 1.0 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: select * from (select key as a, c_int+1 as b, sum(c_int) as c from t1 where (t1.c_int + 1 >= 0) and (t1.c_int > 0 or t1.c_float >= 0) group by c_float, t1.c_int, key having t1.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by a+b desc, c asc limit 5) t1 left semi join (select key as p, c_int+1 as q, sum(c_int) as r from t2 where (t2.c_int + 1 >= 0) and (t2.c_int > 0 or t2.c_float >= 0) group by c_float, t2.c_int, key having t2.c_float > 0 and (c_int >=1 or c_float >= 1) and (c_int + c_float) >= 0 order by q+r/10 desc, p limit 5) t2 on t1.a=p left semi join t3 on t1.a=key where (b + 1 >= 0) and (b > 0 or a >= 0) group by a, c having a > 0 and (a >=1 or c >= 1) and (a + c) >= 0 order by c, a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### + 1 2 + 1 2 +1 2 +1 12 +PREHOOK: query: -- 13. null expr in select list +select null from t3 +PREHOOK: type: QUERY +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: -- 13. null expr in select list +select null from t3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: -- 14. unary operator +select key from t1 where c_int = -6 or c_int = +6 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 14. unary operator +select key from t1 where c_int = -6 or c_int = +6 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +#### A masked pattern was here #### +PREHOOK: query: -- 15. query referencing only partition columns +select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t1@dt=2014 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 15. query referencing only partition columns +select count(t1.dt) from t1 join t2 on t1.dt = t2.dt where t1.dt = '2014' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t1@dt=2014 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +400 +PREHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 16. SubQueries Not In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key not in + ( select key from src_cbo s1 + where s1.key > '2' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +2 val_2 +199 val_199 +199 val_199 +199 val_199 +197 val_197 +197 val_197 +196 val_196 +195 val_195 +195 val_195 +194 val_194 +193 val_193 +193 val_193 +193 val_193 +192 val_192 +191 val_191 +191 val_191 +190 val_190 +19 val_19 +189 val_189 +187 val_187 +187 val_187 +187 val_187 +186 val_186 +183 val_183 +181 val_181 +180 val_180 +18 val_18 +18 val_18 +179 val_179 +179 val_179 +178 val_178 +177 val_177 +176 val_176 +176 val_176 +175 val_175 +175 val_175 +174 val_174 +174 val_174 +172 val_172 +172 val_172 +170 val_170 +17 val_17 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +168 val_168 +167 val_167 +167 val_167 +167 val_167 +166 val_166 +165 val_165 +165 val_165 +164 val_164 +164 val_164 +163 val_163 +162 val_162 +160 val_160 +158 val_158 +157 val_157 +156 val_156 +155 val_155 +153 val_153 +152 val_152 +152 val_152 +150 val_150 +15 val_15 +15 val_15 +149 val_149 +149 val_149 +146 val_146 +146 val_146 +145 val_145 +143 val_143 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +137 val_137 +137 val_137 +136 val_136 +134 val_134 +134 val_134 +133 val_133 +131 val_131 +129 val_129 +129 val_129 +128 val_128 +128 val_128 +128 val_128 +126 val_126 +125 val_125 +125 val_125 +120 val_120 +120 val_120 +12 val_12 +12 val_12 +119 val_119 +119 val_119 +119 val_119 +118 val_118 +118 val_118 +116 val_116 +114 val_114 +113 val_113 +113 val_113 +111 val_111 +11 val_11 +105 val_105 +104 val_104 +104 val_104 +103 val_103 +103 val_103 +100 val_100 +100 val_100 +10 val_10 +0 val_0 +0 val_0 +0 val_0 +PREHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr +select p_mfgr, b.p_name, p_size +from part b +where b.p_name not in + (select p_name + from (select p_mfgr, p_name, p_size as r from part) a + where r < 10 and b.p_mfgr = a.p_mfgr + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr +select p_name, p_size +from +part where part.p_size not in + (select avg(p_size) + from (select p_size from part) a + where p_size < 10 + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +almond antique blue firebrick mint 31 +almond antique burnished rose metallic 2 +almond antique burnished rose metallic 2 +almond antique chartreuse khaki white 17 +almond antique chartreuse lavender yellow 34 +almond antique forest lavender goldenrod 14 +almond antique gainsboro frosted violet 10 +almond antique medium spring khaki 6 +almond antique metallic orange dim 19 +almond antique misty red olive 1 +almond antique olive coral navajo 45 +almond antique salmon chartreuse burlywood 6 +almond antique sky peru orange 2 +almond antique violet chocolate turquoise 14 +almond antique violet mint lemon 39 +almond antique violet turquoise frosted 40 +almond aquamarine burnished black steel 28 +almond aquamarine dodger light gainsboro 46 +almond aquamarine floral ivory bisque 27 +almond aquamarine midnight light salmon 2 +almond aquamarine pink moccasin thistle 42 +almond aquamarine rose maroon antique 25 +almond aquamarine sandy cyan gainsboro 18 +almond aquamarine yellow dodger mint 7 +almond azure aquamarine papaya violet 12 +almond azure blanched chiffon midnight 23 +PREHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +select p_mfgr, p_name, p_size +from part b where b.p_size not in + (select min(p_size) + from (select p_mfgr, p_size from part) a + where p_size < 10 and b.p_mfgr = a.p_mfgr + ) order by p_name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#5 almond antique blue firebrick mint 31 +Manufacturer#3 almond antique chartreuse khaki white 17 +Manufacturer#1 almond antique chartreuse lavender yellow 34 +Manufacturer#3 almond antique forest lavender goldenrod 14 +Manufacturer#4 almond antique gainsboro frosted violet 10 +Manufacturer#5 almond antique medium spring khaki 6 +Manufacturer#3 almond antique metallic orange dim 19 +Manufacturer#3 almond antique olive coral navajo 45 +Manufacturer#1 almond antique salmon chartreuse burlywood 6 +Manufacturer#2 almond antique violet chocolate turquoise 14 +Manufacturer#4 almond antique violet mint lemon 39 +Manufacturer#2 almond antique violet turquoise frosted 40 +Manufacturer#1 almond aquamarine burnished black steel 28 +Manufacturer#5 almond aquamarine dodger light gainsboro 46 +Manufacturer#4 almond aquamarine floral ivory bisque 27 +Manufacturer#1 almond aquamarine pink moccasin thistle 42 +Manufacturer#2 almond aquamarine rose maroon antique 25 +Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 +Manufacturer#4 almond azure aquamarine papaya violet 12 +Manufacturer#5 almond azure blanched chiffon midnight 23 +PREHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, Group By in Parent Query +select li.l_partkey, count(*) +from lineitem li +where li.l_linenumber = 1 and + li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') +group by li.l_partkey +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +450 1 +7068 1 +21636 1 +22630 1 +59694 1 +61931 1 +85951 1 +88035 1 +88362 1 +106170 1 +119477 1 +119767 1 +123076 1 +139636 1 +175839 1 +182052 1 +PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. + +-- non agg, corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from (select p_mfgr, min(p_retailprice) l, max(p_retailprice) r, avg(p_retailprice) a from part group by p_mfgr) a + where min(p_retailprice) = l and r - l > 600 + ) + order by b.p_mfgr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 +PREHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- agg, non corr, having +select b.p_mfgr, min(p_retailprice) +from part b +group by b.p_mfgr +having b.p_mfgr not in + (select p_mfgr + from part a + group by p_mfgr + having max(p_retailprice) - min(p_retailprice) > 600 + ) + order by b.p_mfgr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 1173.15 +Manufacturer#2 1690.68 +PREHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 17. SubQueries In +-- non agg, non corr +select * +from src_cbo +where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- agg, corr +-- add back once rank issue fixed for cbo + +-- distinct, corr +select * +from src_cbo b +where b.key in + (select distinct a.key + from src_cbo a + where b.value = a.value and a.key > '9' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +PREHOOK: type: QUERY +PREHOOK: Input: default@lineitem +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, corr, with join in Parent Query +select p.p_partkey, li.l_suppkey +from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey +where li.l_linenumber = 1 and + li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@lineitem +#### A masked pattern was here #### +4297 1798 +108570 8571 +PREHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- where and having +-- Plan is: +-- Stage 1: b semijoin sq1:src_cbo (subquery in where) +-- Stage 2: group by Stage 1 o/p +-- Stage 5: group by on sq2:src_cbo (subquery in having) +-- Stage 6: Stage 2 o/p semijoin Stage 5 +select key, value, count(*) +from src_cbo b +where b.key in (select key from src_cbo where src_cbo.key > '8') +group by key, value +having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by s1.key ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +80 val_80 1 +96 val_96 1 +92 val_92 1 +9 val_9 1 +87 val_87 1 +86 val_86 1 +85 val_85 1 +82 val_82 1 +84 val_84 2 +95 val_95 2 +83 val_83 2 +98 val_98 2 +97 val_97 2 +90 val_90 3 +PREHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- non agg, non corr, windowing +select p_mfgr, p_name, avg(p_size) +from part +group by p_mfgr, p_name +having p_name in + (select first_value(p_name) over(partition by p_mfgr order by p_size) from part) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +Manufacturer#1 almond antique burnished rose metallic 2.0 +Manufacturer#3 almond antique misty red olive 1.0 +Manufacturer#5 almond antique sky peru orange 2.0 +Manufacturer#2 almond aquamarine midnight light salmon 2.0 +Manufacturer#4 almond aquamarine yellow dodger mint 7.0 +PREHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- 18. SubQueries Not Exists +-- distinct, corr +select * +from src_cbo b +where not exists + (select distinct a.key + from src_cbo a + where b.value = a.value and a.value > 'val_2' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +103 val_103 +103 val_103 +104 val_104 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +119 val_119 +119 val_119 +119 val_119 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +125 val_125 +125 val_125 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +129 val_129 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +134 val_134 +136 val_136 +137 val_137 +137 val_137 +138 val_138 +138 val_138 +138 val_138 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +146 val_146 +149 val_149 +149 val_149 +15 val_15 +15 val_15 +150 val_150 +152 val_152 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +164 val_164 +165 val_165 +165 val_165 +166 val_166 +167 val_167 +167 val_167 +167 val_167 +168 val_168 +169 val_169 +169 val_169 +169 val_169 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +172 val_172 +174 val_174 +174 val_174 +175 val_175 +175 val_175 +176 val_176 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +179 val_179 +18 val_18 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +187 val_187 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +191 val_191 +192 val_192 +193 val_193 +193 val_193 +193 val_193 +194 val_194 +195 val_195 +195 val_195 +196 val_196 +197 val_197 +197 val_197 +199 val_199 +199 val_199 +199 val_199 +2 val_2 +PREHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- no agg, corr, having +select * +from src_cbo b +group by key, value +having not exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_12' + ) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +PREHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src_cbo +PREHOOK: Output: database:default +PREHOOK: Output: default@cv1 +POSTHOOK: query: -- 19. SubQueries Exists +-- view test +create view cv1 as +select * +from src_cbo b +where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src_cbo +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cv1 +PREHOOK: query: select * from cv1 +PREHOOK: type: QUERY +PREHOOK: Input: default@cv1 +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: select * from cv1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cv1 +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from +select * +from (select * + from src_cbo b + where exists + (select a.key + from src_cbo a + where b.value = a.value and a.key = b.key and a.value > 'val_9') + ) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 val_90 +90 val_90 +90 val_90 +92 val_92 +95 val_95 +95 val_95 +96 val_96 +97 val_97 +97 val_97 +98 val_98 +98 val_98 +PREHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@src_cbo +#### A masked pattern was here #### +POSTHOOK: query: -- sq in from, having +select * +from (select b.key, count(*) + from src_cbo b + group by b.key + having exists + (select a.key + from src_cbo a + where a.key = b.key and a.value > 'val_9' + ) +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_cbo +#### A masked pattern was here #### +90 3 +92 1 +95 2 +96 1 +97 2 +98 2 +PREHOOK: query: -- 17. get stats with empty partition list +select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +PREHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### +POSTHOOK: query: -- 17. get stats with empty partition list +select t1.value from t1 join t2 on t1.key = t2.key where t1.dt = '10' and t1.c_boolean = true +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +POSTHOOK: Input: default@t2@dt=2014 +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/tez/correlationoptimizer1.q.out b/ql/src/test/results/clientpositive/tez/correlationoptimizer1.q.out index 6b63ab9..5631926 100644 --- a/ql/src/test/results/clientpositive/tez/correlationoptimizer1.q.out +++ b/ql/src/test/results/clientpositive/tez/correlationoptimizer1.q.out @@ -61,7 +61,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -201,7 +201,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -352,6 +352,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 0 Map 1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -500,7 +502,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: @@ -649,7 +651,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: @@ -789,7 +791,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -923,7 +925,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -1063,7 +1065,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -1197,7 +1199,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -1334,7 +1336,7 @@ STAGE PLANS: Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -1458,7 +1460,7 @@ STAGE PLANS: Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -1591,7 +1593,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Right Outer Join0 to 1 condition expressions: @@ -1725,7 +1727,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Right Outer Join0 to 1 condition expressions: @@ -1865,7 +1867,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Right Outer Join0 to 1 condition expressions: @@ -1999,7 +2001,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Right Outer Join0 to 1 condition expressions: @@ -2141,7 +2143,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Outer Join 0 to 1 condition expressions: @@ -2275,7 +2277,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Outer Join 0 to 1 condition expressions: @@ -2418,7 +2420,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2559,7 +2561,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2701,7 +2703,7 @@ STAGE PLANS: Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2841,7 +2843,7 @@ STAGE PLANS: Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/cross_join.q.out b/ql/src/test/results/clientpositive/tez/cross_join.q.out index e0bb4e3..ad0c759 100644 --- a/ql/src/test/results/clientpositive/tez/cross_join.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_join.q.out @@ -1,4 +1,4 @@ -Warning: Shuffle Join JOIN[4][tables = [src, src2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[7][tables = [src, src2]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- current explain select src.key from src join src src2 PREHOOK: type: QUERY @@ -35,7 +35,7 @@ STAGE PLANS: value expressions: key (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -61,7 +61,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[4][tables = [src, src2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[7][tables = [src, src2]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- ansi cross join explain select src.key from src cross join src src2 PREHOOK: type: QUERY @@ -98,7 +98,7 @@ STAGE PLANS: value expressions: key (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -169,7 +169,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out index ba86137..5286cac 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out @@ -24,7 +24,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@B -Warning: Shuffle Join JOIN[4][tables = [a, b]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[7][tables = [a, b]] in Stage 'Reducer 2' is a cross product PREHOOK: query: explain select * from A join B PREHOOK: type: QUERY POSTHOOK: query: explain select * from A join B @@ -60,7 +60,7 @@ STAGE PLANS: value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -86,7 +86,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[10][tables = [d1, d2, a]] in Stage 'Reducer 3' is a cross product +Warning: Shuffle Join MERGEJOIN[18][tables = [d1, d2, a]] in Stage 'Reducer 3' is a cross product PREHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A PREHOOK: type: QUERY POSTHOOK: query: explain select * from B d1 join B d2 on d1.key = d2.key join A @@ -142,7 +142,7 @@ STAGE PLANS: value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -156,7 +156,7 @@ STAGE PLANS: value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) Reducer 3 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -182,7 +182,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[16][tables = [a, od1]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[25][tables = [a, od1]] in Stage 'Reducer 4' is a cross product PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 on d1.key = d2.key @@ -243,7 +243,7 @@ STAGE PLANS: value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -282,7 +282,7 @@ STAGE PLANS: value expressions: _col0 (type: string) Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -308,8 +308,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[4][tables = [d1, d2]] in Stage 'Reducer 2' is a cross product -Warning: Shuffle Join JOIN[14][tables = [a, od1]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[18][tables = [d1, d2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[19][tables = [a, od1]] in Stage 'Reducer 4' is a cross product PREHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 PREHOOK: type: QUERY POSTHOOK: query: explain select * from A join (select d1.key from B d1 join B d2 where 1 = 1 group by d1.key) od1 @@ -355,7 +355,7 @@ STAGE PLANS: value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -394,7 +394,7 @@ STAGE PLANS: value expressions: _col0 (type: string) Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -420,7 +420,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[21][tables = [ss, od1]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[30][tables = [ss, od1]] in Stage 'Reducer 4' is a cross product PREHOOK: query: explain select * from (select A.key from A group by key) ss join (select d1.key from B d1 join B d2 on d1.key = d2.key where 1 = 1 group by d1.key) od1 @@ -490,7 +490,7 @@ STAGE PLANS: Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -529,7 +529,7 @@ STAGE PLANS: value expressions: _col0 (type: string) Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out index 06def5d..3a2d725 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out @@ -64,6 +64,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 1 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) @@ -118,6 +120,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: @@ -152,6 +156,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + input vertices: + 0 Map 1 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) @@ -213,6 +219,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -256,6 +264,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Reducer 2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) @@ -324,6 +334,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0 + input vertices: + 1 Map 3 Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -362,6 +374,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Reducer 2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) @@ -436,6 +450,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -519,6 +535,8 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1 + input vertices: + 1 Reducer 2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/delete_all_non_partitioned.q.out b/ql/src/test/results/clientpositive/tez/delete_all_non_partitioned.q.out index 0d428ca..38ce075 100644 --- a/ql/src/test/results/clientpositive/tez/delete_all_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/delete_all_non_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_danp -POSTHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_danp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_danp diff --git a/ql/src/test/results/clientpositive/tez/delete_all_partitioned.q.out b/ql/src/test/results/clientpositive/tez/delete_all_partitioned.q.out index 4486323..c5149b2 100644 --- a/ql/src/test/results/clientpositive/tez/delete_all_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/delete_all_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dap -POSTHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dap @@ -84,3 +84,5 @@ POSTHOOK: Input: default@acid_dap POSTHOOK: Input: default@acid_dap@ds=today POSTHOOK: Input: default@acid_dap@ds=tomorrow #### A masked pattern was here #### +-1071480828 aw724t8c5558x2xneC624 today +-1072076362 2uLyD28144vklju213J1mr today diff --git a/ql/src/test/results/clientpositive/tez/delete_tmp_table.q.out b/ql/src/test/results/clientpositive/tez/delete_tmp_table.q.out index ca568b3..4dc7344 100644 --- a/ql/src/test/results/clientpositive/tez/delete_tmp_table.q.out +++ b/ql/src/test/results/clientpositive/tez/delete_tmp_table.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dtt -POSTHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create temporary table acid_dtt(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dtt diff --git a/ql/src/test/results/clientpositive/tez/delete_where_no_match.q.out b/ql/src/test/results/clientpositive/tez/delete_where_no_match.q.out index 1450ee6..cb2adc6 100644 --- a/ql/src/test/results/clientpositive/tez/delete_where_no_match.q.out +++ b/ql/src/test/results/clientpositive/tez/delete_where_no_match.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dwnm -POSTHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dwnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dwnm diff --git a/ql/src/test/results/clientpositive/tez/delete_where_non_partitioned.q.out b/ql/src/test/results/clientpositive/tez/delete_where_non_partitioned.q.out index d465e8e..1bdb1e6 100644 --- a/ql/src/test/results/clientpositive/tez/delete_where_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/delete_where_non_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dwnp -POSTHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dwnp diff --git a/ql/src/test/results/clientpositive/tez/delete_where_partitioned.q.out b/ql/src/test/results/clientpositive/tez/delete_where_partitioned.q.out index 9f8581b..fc2e369 100644 --- a/ql/src/test/results/clientpositive/tez/delete_where_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/delete_where_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dwp -POSTHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dwp diff --git a/ql/src/test/results/clientpositive/tez/delete_whole_partition.q.out b/ql/src/test/results/clientpositive/tez/delete_whole_partition.q.out index a2408eb..043daf4 100644 --- a/ql/src/test/results/clientpositive/tez/delete_whole_partition.q.out +++ b/ql/src/test/results/clientpositive/tez/delete_whole_partition.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_dwhp -POSTHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_dwhp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_dwhp diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out index 78aeff0..c5effa0 100644 --- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out @@ -56,35 +56,35 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -207,12 +207,12 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -244,7 +244,7 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -327,12 +327,12 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -349,7 +349,7 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -449,12 +449,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE value expressions: hr (type: string) Map 5 Map Operator Tree: @@ -516,7 +516,7 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -531,7 +531,7 @@ STAGE PLANS: Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -621,12 +621,12 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: (ds is not null and hr is not null) (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE value expressions: hr (type: string) Map 5 Map Operator Tree: @@ -658,7 +658,7 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -673,7 +673,7 @@ STAGE PLANS: Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -772,12 +772,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string), hr (type: string) sort order: ++ Map-reduce partition columns: ds (type: string), hr (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -824,7 +824,7 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -907,12 +907,12 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: (ds is not null and hr is not null) (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string), hr (type: string) sort order: ++ Map-reduce partition columns: ds (type: string), hr (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -929,7 +929,7 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -1025,12 +1025,12 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -1062,7 +1062,7 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -1145,12 +1145,12 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -1167,7 +1167,7 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -1261,15 +1261,15 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: UDFToDouble(hr) is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: UDFToDouble(hr) is not null (type: boolean) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(hr) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(hr) (type: double) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -1301,15 +1301,15 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 1 - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1384,15 +1384,15 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: (hr * 2) is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (hr * 2) is not null (type: boolean) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: (hr * 2) (type: double) sort order: + Map-reduce partition columns: (hr * 2) (type: double) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -1424,15 +1424,15 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 1 - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1507,15 +1507,15 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: UDFToDouble(hr) is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: UDFToDouble(hr) is not null (type: boolean) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToDouble(hr) (type: double) sort order: + Map-reduce partition columns: UDFToDouble(hr) (type: double) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -1532,15 +1532,15 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 1 - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1615,15 +1615,15 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: (hr * 2) is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (hr * 2) is not null (type: boolean) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: (hr * 2) (type: double) sort order: + Map-reduce partition columns: (hr * 2) (type: double) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -1640,15 +1640,15 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 1 - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1736,15 +1736,15 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: UDFToString((hr * 2)) is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: UDFToString((hr * 2)) is not null (type: boolean) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: UDFToString((hr * 2)) (type: string) sort order: + Map-reduce partition columns: UDFToString((hr * 2)) (type: string) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -1776,15 +1776,15 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 1 - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -1875,25 +1875,25 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Dynamic Partitioning Event Operator Target Input: srcpart Partition key expr: ds - Statistics: Num rows: 2000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Target column: ds Target Vertex: Map 4 Map 4 @@ -1901,40 +1901,40 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: (ds is not null and (ds = '2008-04-08')) (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 1 - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 3 Reduce Operator Tree: @@ -1942,14 +1942,14 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1960,16 +1960,16 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Stage: Stage-0 Fetch Operator @@ -2007,7 +2007,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 1000 -Warning: Shuffle Join JOIN[4][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[13][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- non-equi join EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) PREHOOK: type: QUERY @@ -2030,10 +2030,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE value expressions: ds (type: string), hr (type: string) Map 4 Map Operator Tree: @@ -2050,7 +2050,7 @@ STAGE PLANS: value expressions: ds (type: string), hr (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2097,7 +2097,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[4][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[13][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) PREHOOK: type: QUERY PREHOOK: Input: default@srcpart @@ -2139,12 +2139,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string), hr (type: string) sort order: ++ Map-reduce partition columns: ds (type: string), hr (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -2191,7 +2191,7 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2279,12 +2279,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -2298,7 +2298,7 @@ STAGE PLANS: value expressions: date (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -2365,12 +2365,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -2402,7 +2402,7 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -2467,12 +2467,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -2486,7 +2486,7 @@ STAGE PLANS: value expressions: date (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Outer Join 0 to 1 condition expressions: @@ -2559,12 +2559,12 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: (hr = 11) (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE value expressions: hr (type: string) Map 5 Map Operator Tree: @@ -2625,7 +2625,7 @@ STAGE PLANS: Target Vertex: Map 1 Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2640,7 +2640,7 @@ STAGE PLANS: Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2751,7 +2751,7 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2764,7 +2764,7 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reducer 3 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -2846,48 +2846,48 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Map 6 Map Operator Tree: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 7 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 2 Reduce Operator Tree: @@ -2925,23 +2925,23 @@ STAGE PLANS: Target Vertex: Map 6 Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: 0 1 - Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 5 Reduce Operator Tree: @@ -2949,14 +2949,14 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3043,48 +3043,48 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Map 6 Map Operator Tree: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 7 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 2 Reduce Operator Tree: @@ -3122,42 +3122,42 @@ STAGE PLANS: Target Vertex: Map 6 Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string) outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col2 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Reducer 5 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3246,78 +3246,78 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Map 10 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Map 5 Map Operator Tree: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 8 Map Operator Tree: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reducer 11 Reduce Operator Tree: Group By Operator @@ -3412,21 +3412,21 @@ STAGE PLANS: Target Vertex: Map 8 Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3511,7 +3511,7 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -3521,6 +3521,8 @@ STAGE PLANS: keys: 0 ds (type: string) 1 ds (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -3643,7 +3645,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -3654,6 +3656,8 @@ STAGE PLANS: 0 ds (type: string) 1 ds (type: string) outputColumnNames: _col3 + input vertices: + 1 Map 4 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -3664,6 +3668,8 @@ STAGE PLANS: keys: 0 _col3 (type: string) 1 hr (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE @@ -3815,7 +3821,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -3825,6 +3831,8 @@ STAGE PLANS: keys: 0 ds (type: string), hr (type: string) 1 ds (type: string), hr (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -3959,7 +3967,7 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -3969,6 +3977,8 @@ STAGE PLANS: keys: 0 ds (type: string) 1 ds (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -4064,10 +4074,10 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: UDFToDouble(hr) is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: UDFToDouble(hr) is not null (type: boolean) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -4077,9 +4087,11 @@ STAGE PLANS: keys: 0 UDFToDouble(hr) (type: double) 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double) - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -4183,10 +4195,10 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: (hr * 2) is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (hr * 2) is not null (type: boolean) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -4196,9 +4208,11 @@ STAGE PLANS: keys: 0 (hr * 2) (type: double) 1 hr (type: double) - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -4308,8 +4322,9 @@ STAGE PLANS: Stage: Stage-1 Tez Edges: - Reducer 3 <- Map 1 (BROADCAST_EDGE), Map 2 (SIMPLE_EDGE) - Reducer 4 <- Reducer 3 (SIMPLE_EDGE) + Map 1 <- Reducer 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 4 <- Map 3 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -4317,58 +4332,7 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: ds (type: string) - sort order: + - Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ds (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Group By Operator - keys: _col0 (type: string) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Dynamic Partitioning Event Operator - Target Input: srcpart - Partition key expr: ds - Statistics: Num rows: 2000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE - Target column: ds - Target Vertex: Map 2 - Map 2 - Map Operator Tree: - TableScan - alias: srcpart - filterExpr: (ds is not null and (ds = '2008-04-08')) (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: ds (type: string) - outputColumnNames: ds - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: ds (type: string) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE - Reducer 3 - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -4378,36 +4342,89 @@ STAGE PLANS: keys: 0 ds (type: string) 1 _col0 (type: string) - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + input vertices: + 1 Reducer 4 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) - Reducer 4 + Map 3 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds is not null and (ds = '2008-04-08')) (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reducer 2 Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 4 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Target column: ds + Target Vertex: Map 1 Stage: Stage-0 Fetch Operator @@ -4467,7 +4484,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Outer Join0 to 1 @@ -4478,6 +4495,8 @@ STAGE PLANS: 0 ds (type: string) 1 ds (type: string) outputColumnNames: _col8 + input vertices: + 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col8 = '2008-04-08') (type: boolean) @@ -4549,12 +4568,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 2 Map Operator Tree: TableScan @@ -4573,6 +4592,8 @@ STAGE PLANS: keys: 0 ds (type: string) 1 ds (type: string) + input vertices: + 1 Map 1 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -4632,12 +4653,12 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan @@ -4651,7 +4672,7 @@ STAGE PLANS: value expressions: date (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Outer Join 0 to 1 condition expressions: @@ -4723,7 +4744,7 @@ STAGE PLANS: TableScan alias: srcpart filterExpr: (hr = 11) (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -4734,6 +4755,8 @@ STAGE PLANS: 0 ds (type: string) 1 ds (type: string) outputColumnNames: _col3 + input vertices: + 1 Map 4 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -4744,6 +4767,8 @@ STAGE PLANS: keys: 0 _col3 (type: string) 1 '11' (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE @@ -4960,26 +4985,26 @@ STAGE PLANS: Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: min(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Map 4 Map Operator Tree: TableScan alias: srcpart filterExpr: ds is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Semi Join 0 to 1 @@ -4990,38 +5015,40 @@ STAGE PLANS: 0 ds (type: string) 1 _col0 (type: string) outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + input vertices: + 1 Union 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string) outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col2 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Map 6 Map Operator Tree: TableScan alias: srcpart - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 2 Reduce Operator Tree: @@ -5063,14 +5090,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -5194,10 +5221,10 @@ STAGE PLANS: TableScan alias: srcpart_orc filterExpr: UDFToDouble(hr) is not null (type: boolean) - Statistics: Num rows: 2000 Data size: 188000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2000 Data size: 188000 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: UDFToDouble(hr) is not null (type: boolean) - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -5207,9 +5234,11 @@ STAGE PLANS: keys: 0 ds (type: string), UDFToDouble(hr) (type: double) 1 ds (type: string), UDFToDouble(hr) (type: double) - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 103400 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1100 Data size: 103400 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out index 80967ec..ef11095 100644 --- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out +++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning_2.q.out @@ -67,6 +67,34 @@ POSTHOOK: query: load data local inpath '../../data/files/agg_01-p3.txt' into ta POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@agg_01@dim_shops_id=3 +PREHOOK: query: analyze table dim_shops compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@dim_shops +PREHOOK: Output: default@dim_shops +POSTHOOK: query: analyze table dim_shops compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dim_shops +POSTHOOK: Output: default@dim_shops +PREHOOK: query: analyze table agg_01 partition (dim_shops_id) compute statistics +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Output: default@agg_01 +PREHOOK: Output: default@agg_01@dim_shops_id=1 +PREHOOK: Output: default@agg_01@dim_shops_id=2 +PREHOOK: Output: default@agg_01@dim_shops_id=3 +POSTHOOK: query: analyze table agg_01 partition (dim_shops_id) compute statistics +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Output: default@agg_01 +POSTHOOK: Output: default@agg_01@dim_shops_id=1 +POSTHOOK: Output: default@agg_01@dim_shops_id=2 +POSTHOOK: Output: default@agg_01@dim_shops_id=3 PREHOOK: query: select * from dim_shops PREHOOK: type: QUERY PREHOOK: Input: default@dim_shops @@ -137,29 +165,29 @@ STAGE PLANS: TableScan alias: d1 filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) - Statistics: Num rows: 0 Data size: 18 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: id (type: int) sort order: + Map-reduce partition columns: id (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE value expressions: label (type: string) Select Operator expressions: id (type: int) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Dynamic Partitioning Event Operator Target Input: agg Partition key expr: dim_shops_id - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Target column: dim_shops_id Target Vertex: Map 2 Map 2 @@ -167,7 +195,7 @@ STAGE PLANS: TableScan alias: agg filterExpr: dim_shops_id is not null (type: boolean) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -178,25 +206,554 @@ STAGE PLANS: 0 dim_shops_id (type: int) 1 id (type: int) outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 0 Data size: 39 Basic stats: PARTIAL Column stats: NONE + input vertices: + 1 Map 1 + Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col6 (type: string), _col0 (type: decimal(10,0)) outputColumnNames: _col6, _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(), sum(_col0) keys: _col6 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: decimal(20,0)) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Reducer 4 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0)) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +bar 3 15 +foo 3 6 +PREHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: id (type: int) + sort order: + + Map-reduce partition columns: id (type: int) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + value expressions: label (type: string) + Map 2 + Map Operator Tree: + TableScan + alias: agg + filterExpr: dim_shops_id is not null (type: boolean) + Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {amount} {dim_shops_id} + 1 {id} {label} + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 1 + Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col6 (type: string), _col0 (type: decimal(10,0)) + outputColumnNames: _col6, _col0 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + keys: _col6 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), sum(VALUE._col1) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: decimal(20,0)) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) + Reducer 4 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0)) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +bar 3 15 +foo 3 6 +PREHOOK: query: EXPLAIN SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: id is not null (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: id is not null (type: boolean) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: id (type: int) + sort order: + + Map-reduce partition columns: id (type: int) + Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: NONE + value expressions: label (type: string) + Map 2 + Map Operator Tree: + TableScan + alias: agg + filterExpr: dim_shops_id is not null (type: boolean) + Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {dim_shops_id} + 1 {id} {label} + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col1, _col5, _col6 + input vertices: + 1 Map 1 + Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col1 = _col5) (type: boolean) + Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col6 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 4 Data size: 12 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@agg_01@dim_shops_id=2 +PREHOOK: Input: default@agg_01@dim_shops_id=3 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT d1.label +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@agg_01@dim_shops_id=2 +POSTHOOK: Input: default@agg_01@dim_shops_id=3 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +foo +foo +foo +bar +bar +bar +baz +baz +baz +PREHOOK: query: EXPLAIN SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: (id is not null and (id = 1)) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id is not null and (id = 1)) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: 1 (type: int) + sort order: + + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: agg + filterExpr: (dim_shops_id is not null and (dim_shops_id = 1)) (type: boolean) + Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {amount} {dim_shops_id} + 1 + keys: + 0 dim_shops_id (type: int) + 1 1 (type: int) + outputColumnNames: _col0, _col1 + input vertices: + 1 Map 1 + Statistics: Num rows: 3 Data size: 9 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col1 = 1) and (_col1 = 1)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(10,0)) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@agg_01 +PREHOOK: Input: default@agg_01@dim_shops_id=1 +PREHOOK: Input: default@dim_shops +#### A masked pattern was here #### +POSTHOOK: query: SELECT agg.amount +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and agg.dim_shops_id = 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@agg_01 +POSTHOOK: Input: default@agg_01@dim_shops_id=1 +POSTHOOK: Input: default@dim_shops +#### A masked pattern was here #### +1 +2 +3 +PREHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT d1.label, count(*), sum(agg.amount) +FROM agg_01 agg, +dim_shops d1 +WHERE agg.dim_shops_id = d1.id +and +d1.label in ('foo', 'bar') +GROUP BY d1.label +ORDER BY d1.label +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: d1 + filterExpr: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (id is not null and (label) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: id (type: int) + sort order: + + Map-reduce partition columns: id (type: int) + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + value expressions: label (type: string) + Select Operator + expressions: id (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: agg + Partition key expr: dim_shops_id + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE + Target column: dim_shops_id + Target Vertex: Map 2 + Map 2 + Map Operator Tree: + TableScan + alias: agg + filterExpr: dim_shops_id is not null (type: boolean) + Statistics: Num rows: 9 Data size: 27 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {amount} {dim_shops_id} + 1 {id} {label} + keys: + 0 dim_shops_id (type: int) + 1 id (type: int) + outputColumnNames: _col0, _col1, _col5, _col6 + input vertices: + 1 Map 1 + Statistics: Num rows: 9 Data size: 29 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col1 = _col5) and (_col6) IN ('foo', 'bar')) (type: boolean) + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col6 (type: string), _col0 (type: decimal(10,0)) + outputColumnNames: _col6, _col0 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(), sum(_col0) + keys: _col6 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) Reducer 3 Reduce Operator Tree: @@ -205,25 +762,25 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: decimal(20,0)) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint), _col2 (type: decimal(20,0)) Reducer 4 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: decimal(20,0)) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -294,28 +851,28 @@ STAGE PLANS: TableScan alias: dim_shops filterExpr: (id is not null and (label = 'bar')) (type: boolean) - Statistics: Num rows: 2 Data size: 18 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (id is not null and (label = 'bar')) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: id (type: int) sort order: + Map-reduce partition columns: id (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: id (type: int) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Dynamic Partitioning Event Operator Target Input: agg_01 Partition key expr: dim_shops_id - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Target column: dim_shops_id Target Vertex: Map 3 Map 2 @@ -323,28 +880,28 @@ STAGE PLANS: TableScan alias: dim_shops filterExpr: (id is not null and (label = 'foo')) (type: boolean) - Statistics: Num rows: 2 Data size: 18 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 15 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (id is not null and (label = 'foo')) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: id (type: int) sort order: + Map-reduce partition columns: id (type: int) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: id (type: int) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Dynamic Partitioning Event Operator Target Input: agg_01 Partition key expr: dim_shops_id - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 1 Data size: 5 Basic stats: COMPLETE Column stats: NONE Target column: dim_shops_id Target Vertex: Map 5 Map 3 @@ -362,6 +919,8 @@ STAGE PLANS: 0 dim_shops_id (type: int) 1 id (type: int) outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Map 1 Filter Operator predicate: (_col1 = _col5) (type: boolean) Select Operator @@ -391,6 +950,8 @@ STAGE PLANS: 0 dim_shops_id (type: int) 1 id (type: int) outputColumnNames: _col0, _col1, _col5 + input vertices: + 1 Map 2 Filter Operator predicate: (_col1 = _col5) (type: boolean) Select Operator diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out index f0bf08d..71e3eb5 100644 --- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out @@ -214,6 +214,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.over1k_part_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -300,6 +301,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.over1k_part_limit_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -350,11 +352,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string) sort order: ++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reducer 2 Reduce Operator Tree: @@ -368,6 +370,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.over1k_part_buck_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -417,11 +420,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reducer 2 Reduce Operator Tree: @@ -435,6 +438,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.over1k_part_buck_sort_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -585,6 +589,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.over1k_part_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -671,6 +676,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.over1k_part_limit_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -721,11 +727,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string) sort order: ++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reducer 2 Reduce Operator Tree: @@ -739,6 +745,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.over1k_part_buck_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -788,11 +795,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reducer 2 Reduce Operator Tree: @@ -806,6 +813,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.over1k_part_buck_sort_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -922,8 +930,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 16 - rawDataSize 320 + numRows 32 + rawDataSize 640 totalSize 1348 #### A masked pattern was here #### @@ -966,9 +974,9 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 3 - rawDataSize 60 - totalSize 1034 + numRows 6 + rawDataSize 120 + totalSize 1050 #### A masked pattern was here #### # Storage Information @@ -1010,8 +1018,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 7 - rawDataSize 140 + numRows 14 + rawDataSize 280 totalSize 1166 #### A masked pattern was here #### @@ -1054,9 +1062,9 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 3 - rawDataSize 60 - totalSize 1040 + numRows 6 + rawDataSize 120 + totalSize 1050 #### A masked pattern was here #### # Storage Information @@ -1097,9 +1105,9 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 16 - rawDataSize 320 - totalSize 4332 + numRows 32 + rawDataSize 640 + totalSize 4340 #### A masked pattern was here #### # Storage Information @@ -1140,8 +1148,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 3 - rawDataSize 60 + numRows 6 + rawDataSize 120 totalSize 2094 #### A masked pattern was here #### @@ -1183,9 +1191,9 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 16 - rawDataSize 320 - totalSize 4318 + numRows 32 + rawDataSize 640 + totalSize 4326 #### A masked pattern was here #### # Storage Information @@ -1226,8 +1234,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 3 - rawDataSize 60 + numRows 6 + rawDataSize 120 totalSize 2094 #### A masked pattern was here #### @@ -1362,6 +1370,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.over1k_part2_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -1443,6 +1452,270 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.over1k_part2_orc + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from (select * from over1k_orc order by i limit 10) tmp where t is null or t=27 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: over1k_orc + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col0 is null or (_col0 = 27)) (type: boolean) + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col4 (type: tinyint) + sort order: + + Map-reduce partition columns: _col4 (type: tinyint) + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Extract + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 2960 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: over1k_orc + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (t is null or (t = 27)) (type: boolean) + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + outputColumnNames: si, i, b, f, t + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + sort order: +++++ + Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +PREHOOK: type: QUERY +POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: over1k_orc + Statistics: Num rows: 1049 Data size: 311170 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (t is null or (t = 27)) (type: boolean) + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + outputColumnNames: si, i, b, f, t + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + sort order: +++++ + Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col4 (type: tinyint) + sort order: + + Map-reduce partition columns: _col4 (type: tinyint) + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Extract + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 524 Data size: 155436 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -1837,6 +2110,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.over1k_part_buck_sort2_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -1886,11 +2160,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 1048 Data size: 310873 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Execution mode: vectorized Reducer 2 Reduce Operator Tree: @@ -1904,6 +2178,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.over1k_part_buck_sort2_orc + Execution mode: vectorized Stage: Stage-2 Dependency Collection @@ -2095,17 +2370,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1k_part_buck_sort2_orc - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -2113,14 +2388,14 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -2318,17 +2593,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: over1k_part_buck_sort2_orc - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 19 Data size: 493 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: @@ -2336,14 +2611,14 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out index e2f8673..a69b814 100644 --- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization.q.out @@ -291,11 +291,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string) sort order: ++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reducer 2 Reduce Operator Tree: Extract @@ -357,11 +357,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reducer 2 Reduce Operator Tree: Extract @@ -644,11 +644,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string) sort order: ++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reducer 2 Reduce Operator Tree: Extract @@ -710,11 +710,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reducer 2 Reduce Operator Tree: Extract @@ -843,8 +843,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 16 - rawDataSize 415 + numRows 32 + rawDataSize 830 totalSize 862 #### A masked pattern was here #### @@ -887,8 +887,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 3 - rawDataSize 78 + numRows 6 + rawDataSize 156 totalSize 162 #### A masked pattern was here #### @@ -931,8 +931,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 7 - rawDataSize 181 + numRows 14 + rawDataSize 362 totalSize 376 #### A masked pattern was here #### @@ -975,8 +975,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 2 - numRows 3 - rawDataSize 78 + numRows 6 + rawDataSize 156 totalSize 162 #### A masked pattern was here #### @@ -1018,8 +1018,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 16 - rawDataSize 415 + numRows 32 + rawDataSize 830 totalSize 862 #### A masked pattern was here #### @@ -1061,8 +1061,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 3 - rawDataSize 78 + numRows 6 + rawDataSize 156 totalSize 162 #### A masked pattern was here #### @@ -1104,8 +1104,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 16 - rawDataSize 415 + numRows 32 + rawDataSize 830 totalSize 862 #### A masked pattern was here #### @@ -1147,8 +1147,8 @@ Protect Mode: None Partition Parameters: COLUMN_STATS_ACCURATE true numFiles 8 - numRows 3 - rawDataSize 78 + numRows 6 + rawDataSize 156 totalSize 162 #### A masked pattern was here #### @@ -1381,6 +1381,261 @@ STAGE PLANS: Stage: Stage-3 Stats-Aggr Operator +PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27 +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from (select * from over1k order by i limit 10) tmp where t is null or t=27 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: over1k + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: t (type: tinyint), si (type: smallint), i (type: int), b (type: bigint), f (type: float) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: smallint), _col3 (type: bigint), _col4 (type: float) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: tinyint), VALUE._col1 (type: smallint), KEY.reducesinkkey0 (type: int), VALUE._col2 (type: bigint), VALUE._col3 (type: float) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col0 is null or (_col0 = 27)) (type: boolean) + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: smallint), _col2 (type: int), _col3 (type: bigint), _col4 (type: float), _col0 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col4 (type: tinyint) + sort order: + + Map-reduce partition columns: _col4 (type: tinyint) + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Reducer 3 + Reduce Operator Tree: + Extract + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 240 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: over1k + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (t is null or (t = 27)) (type: boolean) + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + outputColumnNames: si, i, b, f, t + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + sort order: +++++ + Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +PREHOOK: type: QUERY +POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator +explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: over1k + Statistics: Num rows: 4443 Data size: 106636 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (t is null or (t = 27)) (type: boolean) + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + outputColumnNames: si, i, b, f, t + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: si (type: smallint), i (type: int), b (type: bigint), f (type: float), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + sort order: +++++ + Map-reduce partition columns: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: smallint), KEY._col1 (type: int), KEY._col2 (type: bigint), KEY._col3 (type: float), KEY._col4 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col4 (type: tinyint) + sort order: + + Map-reduce partition columns: _col4 (type: tinyint) + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + Reducer 3 + Reduce Operator Tree: + Extract + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2221 Data size: 53305 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ds foo + t + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.over1k_part2 + + Stage: Stage-3 + Stats-Aggr Operator + PREHOOK: query: insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 order by i PREHOOK: type: QUERY PREHOOK: Input: default@over1k @@ -1803,11 +2058,11 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col4 (type: tinyint), -1 (type: int), _col3 (type: float) + key expressions: _col4 (type: tinyint), '_bucket_number' (type: string), _col3 (type: float) sort order: +++ Map-reduce partition columns: _col4 (type: tinyint) Statistics: Num rows: 4442 Data size: 106611 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint) + value expressions: _col0 (type: smallint), _col1 (type: int), _col2 (type: bigint), _col3 (type: float), _col4 (type: tinyint), '_bucket_number' (type: string) Reducer 2 Reduce Operator Tree: Extract diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out new file mode 100644 index 0000000..52bef6f --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out @@ -0,0 +1,1866 @@ +PREHOOK: query: drop table ss +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ss +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table ss_orc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ss_orc +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table ss_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ss_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table ss_part_orc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ss_part_orc +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table ss ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ss +POSTHOOK: query: create table ss ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ss +PREHOOK: query: create table ss_part ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ss_part +POSTHOOK: query: create table ss_part ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ss_part +PREHOOK: query: load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@ss +POSTHOOK: query: load data local inpath '../../data/files/dynpart_test.txt' overwrite into table ss +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@ss +PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: ss + Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + outputColumnNames: ss_sold_date_sk, ss_net_paid_inc_tax, ss_net_profit + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float) + sort order: +++ + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_part +POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 151 + totalSize 162 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +2.1 -2026.3 2452617 +2.99 -11.32 2452617 +85.8 25.61 2452617 +552.96 -1363.84 2452617 +565.92 196.48 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +7412.83 2071.68 2452617 +10022.63 3952.8 2452617 +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 186 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +0.15 -241.22 2452638 +150.39 -162.12 2452638 +156.67 -4626.56 2452638 +181.03 -207.24 2452638 +267.01 -3266.36 2452638 +317.87 -3775.38 2452638 +1327.08 57.97 2452638 +1413.19 178.08 2452638 +1524.33 494.37 2452638 +1971.35 -488.25 2452638 +4133.98 -775.72 2452638 +4329.49 -4000.51 2452638 +10171.1 660.48 2452638 +PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: ss + Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: int) + sort order: + + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_part +POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 151 + totalSize 162 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +10022.63 3952.8 2452617 +2.99 -11.32 2452617 +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +565.92 196.48 2452617 +85.8 25.61 2452617 +7412.83 2071.68 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +552.96 -1363.84 2452617 +2.1 -2026.3 2452617 +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 186 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +4329.49 -4000.51 2452638 +1413.19 178.08 2452638 +150.39 -162.12 2452638 +1524.33 494.37 2452638 +0.15 -241.22 2452638 +267.01 -3266.36 2452638 +181.03 -207.24 2452638 +1971.35 -488.25 2452638 +1327.08 57.97 2452638 +156.67 -4626.56 2452638 +317.87 -3775.38 2452638 +10171.1 660.48 2452638 +4133.98 -775.72 2452638 +PREHOOK: query: -- SORT DYNAMIC PARTITION DISABLED + +explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: -- SORT DYNAMIC PARTITION DISABLED + +explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: ss + Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + outputColumnNames: ss_sold_date_sk, ss_net_paid_inc_tax, ss_net_profit + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float) + sort order: +++ + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_part +POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 151 + totalSize 162 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +2.1 -2026.3 2452617 +2.99 -11.32 2452617 +85.8 25.61 2452617 +552.96 -1363.84 2452617 +565.92 196.48 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +7412.83 2071.68 2452617 +10022.63 3952.8 2452617 +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 186 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +0.15 -241.22 2452638 +150.39 -162.12 2452638 +156.67 -4626.56 2452638 +181.03 -207.24 2452638 +267.01 -3266.36 2452638 +317.87 -3775.38 2452638 +1327.08 57.97 2452638 +1413.19 178.08 2452638 +1524.33 494.37 2452638 +1971.35 -488.25 2452638 +4133.98 -775.72 2452638 +4329.49 -4000.51 2452638 +10171.1 660.48 2452638 +PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: ss + Statistics: Num rows: 46 Data size: 553 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int) + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 5 Data size: 60 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.ss_part + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_part +POSTHOOK: query: insert overwrite table ss_part partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 151 + totalSize 162 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +565.92 196.48 2452617 +85.8 25.61 2452617 +7412.83 2071.68 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +552.96 -1363.84 2452617 +2.1 -2026.3 2452617 +10022.63 3952.8 2452617 +2.99 -11.32 2452617 +PREHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part +POSTHOOK: query: desc formatted ss_part partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 186 + totalSize 199 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part +PREHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part +POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +4329.49 -4000.51 2452638 +1413.19 178.08 2452638 +150.39 -162.12 2452638 +1524.33 494.37 2452638 +0.15 -241.22 2452638 +267.01 -3266.36 2452638 +181.03 -207.24 2452638 +1971.35 -488.25 2452638 +1327.08 57.97 2452638 +156.67 -4626.56 2452638 +317.87 -3775.38 2452638 +10171.1 660.48 2452638 +4133.98 -775.72 2452638 +PREHOOK: query: -- VECTORIZATION IS ENABLED + +create table ss_orc ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ss_orc +POSTHOOK: query: -- VECTORIZATION IS ENABLED + +create table ss_orc ( +ss_sold_date_sk int, +ss_net_paid_inc_tax float, +ss_net_profit float) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ss_orc +PREHOOK: query: create table ss_part_orc ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ss_part_orc +POSTHOOK: query: create table ss_part_orc ( +ss_net_paid_inc_tax float, +ss_net_profit float) +partitioned by (ss_sold_date_sk int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ss_part_orc +PREHOOK: query: insert overwrite table ss_orc select * from ss +PREHOOK: type: QUERY +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss_orc +POSTHOOK: query: insert overwrite table ss_orc select * from ss +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss_orc +POSTHOOK: Lineage: ss_orc.ss_net_paid_inc_tax SIMPLE [(ss)ss.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_orc.ss_net_profit SIMPLE [(ss)ss.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_orc.ss_sold_date_sk SIMPLE [(ss)ss.FieldSchema(name:ss_sold_date_sk, type:int, comment:null), ] +PREHOOK: query: drop table ss +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ss +PREHOOK: Output: default@ss +POSTHOOK: query: drop table ss +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ss +POSTHOOK: Output: default@ss +PREHOOK: query: drop table ss_part +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ss_part +PREHOOK: Output: default@ss_part +POSTHOOK: query: drop table ss_part +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ss_part +POSTHOOK: Output: default@ss_part +PREHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: ss_orc + Statistics: Num rows: 24 Data size: 288 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + outputColumnNames: ss_sold_date_sk, ss_net_paid_inc_tax, ss_net_profit + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ss_sold_date_sk (type: int), ss_net_paid_inc_tax (type: float), ss_net_profit (type: float) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: float), _col2 (type: float) + sort order: +++ + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int), KEY._col1 (type: float), KEY._col2 (type: float) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: float), _col2 (type: float), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.ss_part_orc + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.ss_part_orc + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_orc +PREHOOK: Output: default@ss_part_orc +POSTHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + group by ss_sold_date_sk, + ss_net_paid_inc_tax, + ss_net_profit + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_orc +POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part_orc +POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part_orc +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part_orc +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 88 + totalSize 417 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part_orc +PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +2.1 -2026.3 2452617 +2.99 -11.32 2452617 +85.8 25.61 2452617 +552.96 -1363.84 2452617 +565.92 196.48 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +7412.83 2071.68 2452617 +10022.63 3952.8 2452617 +PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part_orc +POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part_orc +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part_orc +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 104 + totalSize 440 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part_orc +PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +0.15 -241.22 2452638 +150.39 -162.12 2452638 +156.67 -4626.56 2452638 +181.03 -207.24 2452638 +267.01 -3266.36 2452638 +317.87 -3775.38 2452638 +1327.08 57.97 2452638 +1413.19 178.08 2452638 +1524.33 494.37 2452638 +1971.35 -488.25 2452638 +4133.98 -775.72 2452638 +4329.49 -4000.51 2452638 +10171.1 660.48 2452638 +PREHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: ss_orc + Statistics: Num rows: 24 Data size: 288 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((ss_sold_date_sk >= 2452617) and (ss_sold_date_sk <= 2452638)) (type: boolean) + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ss_net_paid_inc_tax (type: float), ss_net_profit (type: float), ss_sold_date_sk (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Map-reduce partition columns: _col2 (type: int) + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: int) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: float), VALUE._col1 (type: float), VALUE._col2 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.ss_part_orc + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + ss_sold_date_sk + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.ss_part_orc + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_orc +PREHOOK: Output: default@ss_part_orc +POSTHOOK: query: insert overwrite table ss_part_orc partition (ss_sold_date_sk) +select ss_net_paid_inc_tax, + ss_net_profit, + ss_sold_date_sk + from ss_orc + where ss_sold_date_sk>=2452617 and ss_sold_date_sk<=2452638 + distribute by ss_sold_date_sk +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_orc +POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452617 +POSTHOOK: Output: default@ss_part_orc@ss_sold_date_sk=2452638 +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452617).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_paid_inc_tax SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_paid_inc_tax, type:float, comment:null), ] +POSTHOOK: Lineage: ss_part_orc PARTITION(ss_sold_date_sk=2452638).ss_net_profit SIMPLE [(ss_orc)ss_orc.FieldSchema(name:ss_net_profit, type:float, comment:null), ] +PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part_orc +POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452617) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part_orc +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452617] +Database: default +Table: ss_part_orc +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 11 + rawDataSize 88 + totalSize 417 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part_orc +PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452617 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452617 +#### A masked pattern was here #### +3423.95 -3164.07 2452617 +5362.01 -600.28 2452617 +565.92 196.48 2452617 +85.8 25.61 2452617 +7412.83 2071.68 2452617 +879.07 -2185.76 2452617 +1765.07 -4648.8 2452617 +552.96 -1363.84 2452617 +2.1 -2026.3 2452617 +10022.63 3952.8 2452617 +2.99 -11.32 2452617 +PREHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ss_part_orc +POSTHOOK: query: desc formatted ss_part_orc partition(ss_sold_date_sk=2452638) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ss_part_orc +# col_name data_type comment + +ss_net_paid_inc_tax float +ss_net_profit float + +# Partition Information +# col_name data_type comment + +ss_sold_date_sk int + +# Detailed Partition Information +Partition Value: [2452638] +Database: default +Table: ss_part_orc +#### A masked pattern was here #### +Protect Mode: None +#### A masked pattern was here #### +Partition Parameters: + COLUMN_STATS_ACCURATE true + numFiles 1 + numRows 13 + rawDataSize 104 + totalSize 440 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638 +PREHOOK: type: QUERY +PREHOOK: Input: default@ss_part_orc +PREHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +POSTHOOK: query: select * from ss_part_orc where ss_sold_date_sk=2452638 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Input: default@ss_part_orc@ss_sold_date_sk=2452638 +#### A masked pattern was here #### +4329.49 -4000.51 2452638 +1413.19 178.08 2452638 +150.39 -162.12 2452638 +1524.33 494.37 2452638 +0.15 -241.22 2452638 +267.01 -3266.36 2452638 +181.03 -207.24 2452638 +1971.35 -488.25 2452638 +1327.08 57.97 2452638 +156.67 -4626.56 2452638 +317.87 -3775.38 2452638 +10171.1 660.48 2452638 +4133.98 -775.72 2452638 +PREHOOK: query: drop table ss_orc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ss_orc +PREHOOK: Output: default@ss_orc +POSTHOOK: query: drop table ss_orc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ss_orc +POSTHOOK: Output: default@ss_orc +PREHOOK: query: drop table ss_part_orc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ss_part_orc +PREHOOK: Output: default@ss_part_orc +POSTHOOK: query: drop table ss_part_orc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ss_part_orc +POSTHOOK: Output: default@ss_part_orc +PREHOOK: query: drop table if exists hive13_dp1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists hive13_dp1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table if not exists hive13_dp1 ( + k1 int, + k2 int +) +PARTITIONED BY(`day` string) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@hive13_dp1 +POSTHOOK: query: create table if not exists hive13_dp1 ( + k1 int, + k2 int +) +PARTITIONED BY(`day` string) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@hive13_dp1 +PREHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(value) + keys: 'day' (type: string), key (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + day + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@hive13_dp1 +POSTHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@hive13_dp1@day=day +POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from hive13_dp1 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@hive13_dp1 +PREHOOK: Input: default@hive13_dp1@day=day +#### A masked pattern was here #### +POSTHOOK: query: select * from hive13_dp1 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@hive13_dp1 +POSTHOOK: Input: default@hive13_dp1@day=day +#### A masked pattern was here #### +0 3 day +10 1 day +100 2 day +103 2 day +104 2 day +PREHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +PREHOOK: type: QUERY +POSTHOOK: query: explain insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(value) + keys: 'day' (type: string), key (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int), _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + day + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.hive13_dp1 + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@hive13_dp1 +POSTHOOK: query: insert overwrite table `hive13_dp1` partition(`day`) +select + key k1, + count(value) k2, + "day" `day` +from src +group by "day", key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@hive13_dp1@day=day +POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: hive13_dp1 PARTITION(day=day).k2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from hive13_dp1 limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@hive13_dp1 +PREHOOK: Input: default@hive13_dp1@day=day +#### A masked pattern was here #### +POSTHOOK: query: select * from hive13_dp1 limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@hive13_dp1 +POSTHOOK: Input: default@hive13_dp1@day=day +#### A masked pattern was here #### +0 3 day +10 1 day +100 2 day +103 2 day +104 2 day +PREHOOK: query: drop table hive13_dp1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@hive13_dp1 +PREHOOK: Output: default@hive13_dp1 +POSTHOOK: query: drop table hive13_dp1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@hive13_dp1 +POSTHOOK: Output: default@hive13_dp1 diff --git a/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out index b236efd..b94ffe2 100644 --- a/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out +++ b/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out @@ -345,15 +345,16 @@ STAGE PLANS: Truncated Path -> Alias: /filter_join_breaktask/ds=2008-04-08 [m] Reducer 2 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} outputColumnNames: _col0, _col7 + Position of Big Table: 0 Statistics: Num rows: 14 Data size: 119 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col7 (type: string) @@ -364,15 +365,16 @@ STAGE PLANS: value expressions: _col0 (type: int) auto parallelism: true Reducer 3 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 {VALUE._col0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col13 + Position of Big Table: 0 Statistics: Num rows: 15 Data size: 130 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col13 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/insert_acid_dynamic_partition.q.out b/ql/src/test/results/clientpositive/tez/insert_acid_dynamic_partition.q.out new file mode 100644 index 0000000..07eedf3 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/insert_acid_dynamic_partition.q.out @@ -0,0 +1,48 @@ +PREHOOK: query: create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_dynamic +POSTHOOK: query: create table acid_dynamic(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_dynamic +PREHOOK: query: insert into table acid_dynamic partition (ds) select cint, cast(cstring1 as varchar(128)), cstring2 from alltypesorc where cint is not null and cint < 0 order by cint limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@acid_dynamic +POSTHOOK: query: insert into table acid_dynamic partition (ds) select cint, cast(cstring1 as varchar(128)), cstring2 from alltypesorc where cint is not null and cint < 0 order by cint limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P +POSTHOOK: Output: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d +POSTHOOK: Output: default@acid_dynamic@ds=KbaDXiN85adbHRx58v +POSTHOOK: Output: default@acid_dynamic@ds=P76636jJ6qM17d7DIy +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4KWs6gw7lv2WYd66P).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4KWs6gw7lv2WYd66P).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4hA4KQj2vD3fI6gX82220d).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=4hA4KQj2vD3fI6gX82220d).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=KbaDXiN85adbHRx58v).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=KbaDXiN85adbHRx58v).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=P76636jJ6qM17d7DIy).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_dynamic PARTITION(ds=P76636jJ6qM17d7DIy).b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: select * from acid_dynamic order by a,b +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_dynamic +PREHOOK: Input: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P +PREHOOK: Input: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d +PREHOOK: Input: default@acid_dynamic@ds=KbaDXiN85adbHRx58v +PREHOOK: Input: default@acid_dynamic@ds=P76636jJ6qM17d7DIy +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_dynamic order by a,b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_dynamic +POSTHOOK: Input: default@acid_dynamic@ds=4KWs6gw7lv2WYd66P +POSTHOOK: Input: default@acid_dynamic@ds=4hA4KQj2vD3fI6gX82220d +POSTHOOK: Input: default@acid_dynamic@ds=KbaDXiN85adbHRx58v +POSTHOOK: Input: default@acid_dynamic@ds=P76636jJ6qM17d7DIy +#### A masked pattern was here #### +-1073279343 oj1YrV5Wa P76636jJ6qM17d7DIy +-1073051226 A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d +-1072910839 0iqrc5 KbaDXiN85adbHRx58v +-1072081801 dPkN74F7 4KWs6gw7lv2WYd66P +-1072076362 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P diff --git a/ql/src/test/results/clientpositive/tez/insert_acid_not_bucketed.q.out b/ql/src/test/results/clientpositive/tez/insert_acid_not_bucketed.q.out new file mode 100644 index 0000000..985ae40 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/insert_acid_not_bucketed.q.out @@ -0,0 +1,36 @@ +PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_notbucketed +PREHOOK: query: insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: insert into table acid_notbucketed select cint, cast(cstring1 as varchar(128)) from alltypesorc where cint is not null order by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@acid_notbucketed +POSTHOOK: Lineage: acid_notbucketed.a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: acid_notbucketed.b EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: select * from acid_notbucketed +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_notbucketed +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_notbucketed +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_notbucketed +#### A masked pattern was here #### +-1073279343 oj1YrV5Wa +-1073051226 A34p7oRr2WvUJNf +-1072910839 0iqrc5 +-1072081801 dPkN74F7 +-1072076362 2uLyD28144vklju213J1mr +-1071480828 aw724t8c5558x2xneC624 +-1071363017 Anj0oF +-1070883071 0ruyd6Y50JpdGRf6HqD +-1070551679 iUR3Q +-1069736047 k17Am8uPHWk02cEf1jet diff --git a/ql/src/test/results/clientpositive/tez/insert_into1.q.out b/ql/src/test/results/clientpositive/tez/insert_into1.q.out index 945d986..359470b 100644 --- a/ql/src/test/results/clientpositive/tez/insert_into1.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_into1.q.out @@ -104,6 +104,31 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 10226524244 +PREHOOK: query: explain +select count(*) from insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +100 PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into1 SELECT * FROM src LIMIT 100 @@ -198,11 +223,27 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### 20453048488 -PREHOOK: query: SELECT COUNT(*) FROM insert_into1 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 PREHOOK: type: QUERY PREHOOK: Input: default@insert_into1 #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM insert_into1 +POSTHOOK: query: select count(*) from insert_into1 POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### @@ -301,6 +342,31 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into1 #### A masked pattern was here #### -826625916 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from insert_into1 +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from insert_into1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into1 +#### A masked pattern was here #### +10 PREHOOK: query: DROP TABLE insert_into1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into1 diff --git a/ql/src/test/results/clientpositive/tez/insert_into2.q.out b/ql/src/test/results/clientpositive/tez/insert_into2.q.out index a24ca97..6bfa257 100644 --- a/ql/src/test/results/clientpositive/tez/insert_into2.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_into2.q.out @@ -97,6 +97,31 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@insert_into2@ds=1 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +select count (*) from insert_into2 where ds = '1' +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count (*) from insert_into2 where ds = '1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: select count (*) from insert_into2 where ds = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +POSTHOOK: query: select count (*) from insert_into2 where ds = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +100 PREHOOK: query: INSERT INTO TABLE insert_into2 PARTITION (ds='1') SELECT * FROM src limit 100 PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -107,15 +132,29 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@insert_into2@ds=1 POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: insert_into2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='1' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='1' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1' PREHOOK: type: QUERY PREHOOK: Input: default@insert_into2 -PREHOOK: Input: default@insert_into2@ds=1 #### A masked pattern was here #### POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='1' POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into2 -POSTHOOK: Input: default@insert_into2@ds=1 #### A masked pattern was here #### 200 PREHOOK: query: SELECT SUM(HASH(c)) FROM ( @@ -237,6 +276,31 @@ POSTHOOK: Input: default@insert_into2@ds=1 POSTHOOK: Input: default@insert_into2@ds=2 #### A masked pattern was here #### -36239931656 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +100 PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert_into2 PARTITION (ds='2') SELECT * FROM src LIMIT 50 PREHOOK: type: QUERY @@ -341,6 +405,31 @@ POSTHOOK: Input: default@insert_into2@ds=1 POSTHOOK: Input: default@insert_into2@ds=2 #### A masked pattern was here #### -27100860056 +PREHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +POSTHOOK: query: explain +SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM insert_into2 WHERE ds='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@insert_into2 +#### A masked pattern was here #### +50 PREHOOK: query: DROP TABLE insert_into2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert_into2 diff --git a/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out b/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out index 97a284b..5eea74d 100644 --- a/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_orig_table.q.out @@ -10,7 +10,7 @@ PREHOOK: query: create table acid_iot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_iot @@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_iot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_iot diff --git a/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out b/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out index e9f9984..9a3cf4b 100644 --- a/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_update_delete.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_iud -POSTHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_iud(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_iud diff --git a/ql/src/test/results/clientpositive/tez/insert_values_acid_not_bucketed.q.out b/ql/src/test/results/clientpositive/tez/insert_values_acid_not_bucketed.q.out new file mode 100644 index 0000000..4f8ddfa --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/insert_values_acid_not_bucketed.q.out @@ -0,0 +1,28 @@ +PREHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: create table acid_notbucketed(a int, b varchar(128)) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acid_notbucketed +PREHOOK: query: insert into table acid_notbucketed values (1, 'abc'), (2, 'def') +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@acid_notbucketed +POSTHOOK: query: insert into table acid_notbucketed values (1, 'abc'), (2, 'def') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@acid_notbucketed +POSTHOOK: Lineage: acid_notbucketed.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +POSTHOOK: Lineage: acid_notbucketed.b EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +PREHOOK: query: select * from acid_notbucketed +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_notbucketed +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_notbucketed +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_notbucketed +#### A masked pattern was here #### +1 abc +2 def diff --git a/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out b/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out index daea059..773feb4 100644 --- a/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_values_dynamic_partitioned.q.out @@ -1,12 +1,12 @@ PREHOOK: query: create table ivdp(i int, de decimal(5,2), - vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc + vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ivdp POSTHOOK: query: create table ivdp(i int, de decimal(5,2), - vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc + vc varchar(128)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ivdp diff --git a/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out b/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out index ff041b8..5b1c3cc 100644 --- a/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_values_non_partitioned.q.out @@ -10,7 +10,7 @@ PREHOOK: query: create table acid_ivnp(ti tinyint, b boolean, s string, vc varchar(128), - ch char(12)) clustered by (i) into 2 buckets stored as orc + ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_ivnp @@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_ivnp(ti tinyint, b boolean, s string, vc varchar(128), - ch char(12)) clustered by (i) into 2 buckets stored as orc + ch char(12)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_ivnp diff --git a/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out b/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out index 69220ec..684cd1b 100644 --- a/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_values_orig_table.q.out @@ -10,7 +10,7 @@ PREHOOK: query: create table acid_ivot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_ivot @@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_ivot( ctimestamp1 TIMESTAMP, ctimestamp2 TIMESTAMP, cboolean1 BOOLEAN, - cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc + cboolean2 BOOLEAN) clustered by (cint) into 1 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_ivot diff --git a/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out b/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out index 9fb89ff..6681992 100644 --- a/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_values_partitioned.q.out @@ -9,7 +9,7 @@ PREHOOK: query: create table acid_ivp(ti tinyint, dt date, s string, vc varchar(128), - ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc + ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_ivp @@ -24,7 +24,7 @@ POSTHOOK: query: create table acid_ivp(ti tinyint, dt date, s string, vc varchar(128), - ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc + ch char(12)) partitioned by (ds string) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_ivp diff --git a/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out b/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out index 95d6372..170b4a7 100644 --- a/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out +++ b/ql/src/test/results/clientpositive/tez/insert_values_tmp_table.q.out @@ -1,20 +1,22 @@ -PREHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc +PREHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_ivtt -POSTHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc +POSTHOOK: query: create temporary table acid_ivtt(i int, de decimal(5,2), vc varchar(128)) clustered by (vc) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_ivtt PREHOOK: query: insert into table acid_ivtt values (1, 109.23, 'mary had a little lamb'), - (429496729, 0.14, 'its fleece was white as snow') + (429496729, 0.14, 'its fleece was white as snow'), + (-29496729, -0.14, 'negative values test') PREHOOK: type: QUERY PREHOOK: Input: default@values__tmp__table__1 PREHOOK: Output: default@acid_ivtt POSTHOOK: query: insert into table acid_ivtt values (1, 109.23, 'mary had a little lamb'), - (429496729, 0.14, 'its fleece was white as snow') + (429496729, 0.14, 'its fleece was white as snow'), + (-29496729, -0.14, 'negative values test') POSTHOOK: type: QUERY POSTHOOK: Input: default@values__tmp__table__1 POSTHOOK: Output: default@acid_ivtt @@ -29,5 +31,6 @@ POSTHOOK: query: select i, de, vc from acid_ivtt order by i POSTHOOK: type: QUERY POSTHOOK: Input: default@acid_ivtt #### A masked pattern was here #### +-29496729 -0.14 negative values test 1 109.23 mary had a little lamb 429496729 0.14 its fleece was white as snow diff --git a/ql/src/test/results/clientpositive/tez/join0.q.out b/ql/src/test/results/clientpositive/tez/join0.q.out index 5691ef6..4835781 100644 --- a/ql/src/test/results/clientpositive/tez/join0.q.out +++ b/ql/src/test/results/clientpositive/tez/join0.q.out @@ -1,4 +1,4 @@ -Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product PREHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM @@ -61,7 +61,7 @@ STAGE PLANS: value expressions: _col0 (type: string), _col1 (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -97,7 +97,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product PREHOOK: query: EXPLAIN FORMATTED SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM @@ -115,7 +115,7 @@ SELECT src1.key as k1, src1.value as v1, SORT BY k1, v1, k2, v2 POSTHOOK: type: QUERY #### A masked pattern was here #### -Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product PREHOOK: query: SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 diff --git a/ql/src/test/results/clientpositive/tez/join1.q.out b/ql/src/test/results/clientpositive/tez/join1.q.out index 986ec3f..3a6c5d9 100644 --- a/ql/src/test/results/clientpositive/tez/join1.q.out +++ b/ql/src/test/results/clientpositive/tez/join1.q.out @@ -56,7 +56,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out b/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out index 3ef7390..c765ab7 100644 --- a/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out +++ b/ql/src/test/results/clientpositive/tez/limit_pushdown.q.out @@ -673,9 +673,11 @@ STAGE PLANS: PREHOOK: query: select key,value from src order by key limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: query: select key,value from src order by key limit 0 POSTHOOK: type: QUERY +POSTHOOK: Input: default@src #### A masked pattern was here #### PREHOOK: query: -- 2MR (applied to last RS) explain @@ -887,7 +889,7 @@ STAGE PLANS: value expressions: _col1 (type: bigint) Reducer 3 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out index 8ff926b..dcf011e 100644 --- a/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out +++ b/ql/src/test/results/clientpositive/tez/load_dyn_part1.q.out @@ -67,9 +67,6 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-2 Tez - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Map 1 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -84,12 +81,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part1 Filter Operator predicate: (ds > '2008-04-08') (type: boolean) Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE @@ -97,36 +96,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part1 - Reducer 3 - Reduce Operator Tree: - Extract - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 666 Data size: 7075 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part2 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part2 Stage: Stage-3 Dependency Collection diff --git a/ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out index 9745dea..8f95a76 100644 --- a/ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out +++ b/ql/src/test/results/clientpositive/tez/load_dyn_part3.q.out @@ -53,8 +53,6 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Tez - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -66,24 +64,14 @@ STAGE PLANS: expressions: key (type: string), value (type: string), ds (type: string), hr (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string), _col3 (type: string) - sort order: ++ - Map-reduce partition columns: _col2 (type: string), _col3 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.nzhang_part3 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.nzhang_part3 Stage: Stage-2 Dependency Collection diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out index a7d6742..a2f677d 100644 --- a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out +++ b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out @@ -120,6 +120,8 @@ STAGE PLANS: 0 dec (type: decimal(4,2)) 1 dec (type: decimal(4,0)) outputColumnNames: _col0, _col4 + input vertices: + 1 Map 1 Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: decimal(4,2)), _col4 (type: decimal(4,0)) diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out index ebaa41c..2a3e56c 100644 --- a/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/tez/mapjoin_mapjoin.q.out @@ -83,6 +83,8 @@ STAGE PLANS: 0 value (type: string) 1 value (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Position of Big Table: 0 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Map Join Operator @@ -96,6 +98,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 2 Position of Big Table: 0 Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE Select Operator @@ -489,6 +493,8 @@ STAGE PLANS: 0 value (type: string) 1 value (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Statistics: Num rows: 182 Data size: 1939 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -500,6 +506,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 2 Statistics: Num rows: 200 Data size: 2132 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -581,6 +589,8 @@ STAGE PLANS: 0 value (type: string) 1 value (type: string) outputColumnNames: _col0, _col2 + input vertices: + 1 Map 4 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -592,6 +602,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col2 + input vertices: + 1 Map 3 Statistics: Num rows: 605 Data size: 6427 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out b/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out index 7942ce7..31c77bc 100644 --- a/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out +++ b/ql/src/test/results/clientpositive/tez/metadata_only_queries.q.out @@ -352,9 +352,11 @@ STAGE PLANS: PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl #### A masked pattern was here #### POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl #### A masked pattern was here #### 9999 9999 1999.8 9999 9999 9999 9999 9999 PREHOOK: query: explain @@ -375,9 +377,11 @@ STAGE PLANS: PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl #### A masked pattern was here #### POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl #### A masked pattern was here #### 65536 65791 4294967296 4294967551 0.009999999776482582 99.9800033569336 0.01 50.0 PREHOOK: query: explain @@ -398,9 +402,11 @@ STAGE PLANS: PREHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select count(*), sum(1), sum(0.2), count(1), count(s), count(bo), count(bin), count(si) from stats_tbl_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 9489 9489 1897.8 9489 9489 9489 9489 9489 PREHOOK: query: explain @@ -421,9 +427,11 @@ STAGE PLANS: PREHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part PREHOOK: type: QUERY +PREHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### POSTHOOK: query: select min(i), max(i), min(b), max(b), min(f), max(f), min(d), max(d) from stats_tbl_part POSTHOOK: type: QUERY +POSTHOOK: Input: default@stats_tbl_part #### A masked pattern was here #### 65536 65791 4294967296 4294967551 0.009999999776482582 99.9800033569336 0.01 50.0 PREHOOK: query: explain select count(ts) from stats_tbl_part diff --git a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out index fa22920..cd038c8 100644 --- a/ql/src/test/results/clientpositive/tez/metadataonly1.q.out +++ b/ql/src/test/results/clientpositive/tez/metadataonly1.q.out @@ -48,17 +48,17 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -135,20 +135,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string) auto parallelism: false @@ -204,17 +204,17 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -286,22 +286,22 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(DISTINCT ds) keys: ds (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 auto parallelism: false Path -> Alias: @@ -356,17 +356,17 @@ STAGE PLANS: aggregations: count(DISTINCT KEY._col0:0._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -438,20 +438,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: bigint) auto parallelism: false @@ -507,17 +507,17 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -627,20 +627,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string) auto parallelism: false @@ -734,13 +734,13 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Reduce Output Operator key expressions: ds (type: string) sort order: + Map-reduce partition columns: ds (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: 0 auto parallelism: true Path -> Alias: @@ -836,46 +836,47 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: _col0 is not null (type: boolean) - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE tag: 1 auto parallelism: true Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Dynamic Partitioning Event Operator Target Input: a2 Partition key expr: ds - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Target column: ds Target Vertex: Map 5 Reducer 3 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 1 + Position of Big Table: 0 Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: NONE @@ -1016,23 +1017,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string), hr (type: string) outputColumnNames: ds, hr - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(DISTINCT hr) keys: ds (type: string), hr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 auto parallelism: true Path -> Alias: @@ -1175,17 +1176,17 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1267,23 +1268,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string), hr (type: string) outputColumnNames: ds, hr - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(hr) keys: ds (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: true @@ -1425,17 +1426,17 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1510,20 +1511,20 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string) outputColumnNames: ds - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: max(ds) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE tag: -1 value expressions: _col0 (type: string) auto parallelism: false @@ -1621,17 +1622,17 @@ STAGE PLANS: aggregations: max(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1762,23 +1763,23 @@ STAGE PLANS: Map Operator Tree: TableScan alias: test2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE GatherStats: false Select Operator expressions: ds (type: string), hr (type: string) outputColumnNames: ds, hr - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(DISTINCT hr) keys: ds (type: string), hr (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 auto parallelism: true Path -> Alias: @@ -2007,17 +2008,17 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/tez/mrr.q.out b/ql/src/test/results/clientpositive/tez/mrr.q.out index 6d0d73f..5718c32 100644 --- a/ql/src/test/results/clientpositive/tez/mrr.q.out +++ b/ql/src/test/results/clientpositive/tez/mrr.q.out @@ -439,7 +439,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -867,6 +867,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col5, _col6 + input vertices: + 1 Map 1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col5 (type: string), _col6 (type: string) @@ -1424,7 +1426,7 @@ STAGE PLANS: value expressions: _col1 (type: bigint) Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 Inner Join 0 to 2 @@ -1706,6 +1708,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 0 Reducer 3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string), _col3 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out b/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out index 71e229e..2af6134 100644 --- a/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out +++ b/ql/src/test/results/clientpositive/tez/optimize_nullscan.q.out @@ -55,6 +55,99 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### PREHOOK: query: explain extended +select count(key) from srcpart where 1=2 group by key +PREHOOK: type: QUERY +POSTHOOK: query: explain extended +select count(key) from srcpart where 1=2 group by key +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + srcpart + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTION + count + TOK_TABLE_OR_COL + key + TOK_WHERE + = + 1 + 2 + TOK_GROUPBY + TOK_TABLE_OR_COL + key + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Reducer 2 + Needs Tagging: false + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col1 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types bigint + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(key) from srcpart where 1=2 group by key +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +#### A masked pattern was here #### +POSTHOOK: query: select count(key) from srcpart where 1=2 group by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +#### A masked pattern was here #### +PREHOOK: query: explain extended select * from (select key from src where false) a left outer join (select key from srcpart limit 0) b on a.key=b.key PREHOOK: type: QUERY POSTHOOK: query: explain extended @@ -414,15 +507,16 @@ STAGE PLANS: -mr-10005default.srcpart{ds=2008-04-09, hr=11} [srcpart] -mr-10006default.srcpart{ds=2008-04-09, hr=12} [srcpart] Reducer 2 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} outputColumnNames: _col0, _col1 + Position of Big Table: 0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -939,7 +1033,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 2000 0 -Warning: Shuffle Join JOIN[11][tables = [a, b]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[15][tables = [a, b]] in Stage 'Reducer 2' is a cross product PREHOOK: query: explain extended select * from (select key from src where false) a left outer join (select value from srcpart limit 0) b PREHOOK: type: QUERY @@ -1290,15 +1384,16 @@ STAGE PLANS: -mr-10005default.srcpart{ds=2008-04-09, hr=11} [srcpart] -mr-10006default.srcpart{ds=2008-04-09, hr=12} [srcpart] Reducer 2 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: 0 {VALUE._col0} 1 {VALUE._col0} outputColumnNames: _col0, _col1 + Position of Big Table: 0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -1348,7 +1443,7 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join JOIN[11][tables = [a, b]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[15][tables = [a, b]] in Stage 'Reducer 2' is a cross product PREHOOK: query: select * from (select key from src where false) a left outer join (select value from srcpart limit 0) b PREHOOK: type: QUERY PREHOOK: Input: default@src @@ -1795,15 +1890,16 @@ STAGE PLANS: Truncated Path -> Alias: /src [src] Reducer 2 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 + Position of Big Table: 0 Filter Operator isSamplingPred: false predicate: false (type: boolean) diff --git a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out index 6e22f97..e718b29 100644 --- a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out @@ -73,9 +73,11 @@ POSTHOOK: Lineage: orc_create_people.start_date SIMPLE [(orc_create_people_stagi POSTHOOK: Lineage: orc_create_people.state SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:state, type:string, comment:null), ] PREHOOK: query: analyze table orc_create_people compute statistics partialscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_create_people PREHOOK: Output: default@orc_create_people POSTHOOK: query: analyze table orc_create_people compute statistics partialscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people PREHOOK: query: desc formatted orc_create_people PREHOOK: type: DESCTABLE @@ -105,7 +107,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3123 + totalSize 3121 #### A masked pattern was here #### # Storage Information @@ -195,7 +197,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3123 + totalSize 3121 #### A masked pattern was here #### # Storage Information @@ -269,11 +271,13 @@ POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_cre POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ] PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_create_people PREHOOK: Output: default@orc_create_people PREHOOK: Output: default@orc_create_people@state=Ca PREHOOK: Output: default@orc_create_people@state=Or POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people POSTHOOK: Output: default@orc_create_people@state=Ca POSTHOOK: Output: default@orc_create_people@state=Or @@ -581,11 +585,13 @@ POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).salary SIMPLE [(orc_cre POSTHOOK: Lineage: orc_create_people PARTITION(state=Or).start_date SIMPLE [(orc_create_people_staging)orc_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ] PREHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_create_people PREHOOK: Output: default@orc_create_people PREHOOK: Output: default@orc_create_people@state=Ca PREHOOK: Output: default@orc_create_people@state=Or POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics partialscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people POSTHOOK: Output: default@orc_create_people@state=Ca POSTHOOK: Output: default@orc_create_people@state=Or @@ -618,10 +624,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 4 + numFiles 1 numRows 50 - rawDataSize 21980 - totalSize 4963 + rawDataSize 21950 + totalSize 2024 #### A masked pattern was here #### # Storage Information @@ -663,10 +669,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 4 + numFiles 1 numRows 50 - rawDataSize 22048 - totalSize 5051 + rawDataSize 22050 + totalSize 2043 #### A masked pattern was here #### # Storage Information @@ -771,10 +777,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 4 + numFiles 1 numRows 50 - rawDataSize 21980 - totalSize 4963 + rawDataSize 21950 + totalSize 2024 #### A masked pattern was here #### # Storage Information @@ -816,10 +822,10 @@ Protect Mode: None #### A masked pattern was here #### Partition Parameters: COLUMN_STATS_ACCURATE true - numFiles 4 + numFiles 1 numRows 50 - rawDataSize 22048 - totalSize 5051 + rawDataSize 22050 + totalSize 2043 #### A masked pattern was here #### # Storage Information @@ -942,12 +948,14 @@ POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people PREHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_create_people PREHOOK: Output: default@orc_create_people PREHOOK: Output: default@orc_create_people@state=Ca PREHOOK: Output: default@orc_create_people@state=OH PREHOOK: Output: default@orc_create_people@state=Or POSTHOOK: query: analyze table orc_create_people partition(state) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_create_people POSTHOOK: Output: default@orc_create_people POSTHOOK: Output: default@orc_create_people@state=Ca POSTHOOK: Output: default@orc_create_people@state=OH diff --git a/ql/src/test/results/clientpositive/tez/orc_merge1.q.out b/ql/src/test/results/clientpositive/tez/orc_merge1.q.out index cab357a..c87b187 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge1.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge1.q.out @@ -146,7 +146,7 @@ Partition Parameters: numFiles 6 numRows 242 rawDataSize 22748 - totalSize 3046 + totalSize 3037 #### A masked pattern was here #### # Storage Information @@ -315,7 +315,7 @@ Partition Parameters: numFiles 1 numRows 242 rawDataSize 22748 - totalSize 1328 + totalSize 1325 #### A masked pattern was here #### # Storage Information @@ -476,7 +476,7 @@ Partition Parameters: numFiles 1 numRows 242 rawDataSize 22748 - totalSize 2401 + totalSize 2392 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/orc_merge2.q.out b/ql/src/test/results/clientpositive/tez/orc_merge2.q.out index e08e211..0f1917f 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge2.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge2.q.out @@ -33,8 +33,6 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Tez - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -46,24 +44,14 @@ STAGE PLANS: expressions: UDFToInteger(key) (type: int), value (type: string), (hash(key) pmod 10) (type: int), (hash(value) pmod 10) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: int), _col3 (type: int) - sort order: ++ - Map-reduce partition columns: _col2 (type: int), _col3 (type: int) + File Output Operator + compressed: false Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.orcfile_merge2a + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.orcfile_merge2a Stage: Stage-2 Dependency Collection diff --git a/ql/src/test/results/clientpositive/tez/orc_merge5.q.out b/ql/src/test/results/clientpositive/tez/orc_merge5.q.out index 27e3b31..fdd4c77 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge5.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge5.q.out @@ -97,10 +97,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema PREHOOK: query: -- 3 files total analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 3 files total analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -128,7 +130,7 @@ Table Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -265,10 +267,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema PREHOOK: query: -- 1 file after merging analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 1 file after merging analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -296,7 +300,7 @@ Table Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information @@ -335,9 +339,11 @@ POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(nam POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -365,7 +371,7 @@ Table Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -425,10 +431,12 @@ POSTHOOK: Output: default@orc_merge5b PREHOOK: query: -- 1 file after merging analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 1 file after merging analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -456,7 +464,7 @@ Table Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/orc_merge6.q.out b/ql/src/test/results/clientpositive/tez/orc_merge6.q.out index 575564e..b9057f5 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge6.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge6.q.out @@ -115,19 +115,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_ PREHOOK: query: -- 3 files total analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 POSTHOOK: query: -- 3 files total analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) @@ -162,7 +166,7 @@ Partition Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -207,7 +211,7 @@ Partition Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -375,19 +379,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_ PREHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 POSTHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) @@ -422,7 +430,7 @@ Partition Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information @@ -467,7 +475,7 @@ Partition Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information @@ -534,18 +542,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merg POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) @@ -580,7 +592,7 @@ Partition Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -625,7 +637,7 @@ Partition Parameters: numFiles 3 numRows 3 rawDataSize 765 - totalSize 1141 + totalSize 1133 #### A masked pattern was here #### # Storage Information @@ -711,19 +723,23 @@ POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 POSTHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2000/hour=24 PREHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2001/hour=24 POSTHOOK: query: analyze table orc_merge5a partition(year="2001",hour=24) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 PREHOOK: query: desc formatted orc_merge5a partition(year="2000",hour=24) @@ -758,7 +774,7 @@ Partition Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information @@ -803,7 +819,7 @@ Partition Parameters: numFiles 1 numRows 3 rawDataSize 765 - totalSize 907 + totalSize 899 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/orc_merge7.q.out b/ql/src/test/results/clientpositive/tez/orc_merge7.q.out index 011f279..454b120 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge7.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge7.q.out @@ -148,19 +148,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc PREHOOK: query: -- 3 files total analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: -- 3 files total analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -194,7 +198,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 255 - totalSize 521 + totalSize 513 #### A masked pattern was here #### # Storage Information @@ -238,7 +242,7 @@ Partition Parameters: numFiles 2 numRows 2 rawDataSize 510 - totalSize 1058 + totalSize 1044 #### A masked pattern was here #### # Storage Information @@ -440,19 +444,23 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc PREHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -486,7 +494,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 255 - totalSize 521 + totalSize 513 #### A masked pattern was here #### # Storage Information @@ -530,7 +538,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 510 - totalSize 852 + totalSize 838 #### A masked pattern was here #### # Storage Information @@ -636,18 +644,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_mer POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -681,7 +693,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 255 - totalSize 521 + totalSize 513 #### A masked pattern was here #### # Storage Information @@ -725,7 +737,7 @@ Partition Parameters: numFiles 2 numRows 2 rawDataSize 510 - totalSize 1058 + totalSize 1044 #### A masked pattern was here #### # Storage Information @@ -813,19 +825,23 @@ POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: -- 1 file after merging analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -859,7 +875,7 @@ Partition Parameters: numFiles 1 numRows 1 rawDataSize 255 - totalSize 521 + totalSize 513 #### A masked pattern was here #### # Storage Information @@ -903,7 +919,7 @@ Partition Parameters: numFiles 1 numRows 2 rawDataSize 510 - totalSize 852 + totalSize 838 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out index 2bf4634..33b5fcd 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out @@ -148,10 +148,12 @@ POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema PREHOOK: query: -- 5 files total analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 5 files total analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -179,7 +181,7 @@ Table Parameters: numFiles 5 numRows 15 rawDataSize 3825 - totalSize 2862 + totalSize 2877 #### A masked pattern was here #### # Storage Information @@ -226,10 +228,12 @@ POSTHOOK: Output: default@orc_merge5b PREHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b POSTHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b PREHOOK: query: desc formatted orc_merge5b PREHOOK: type: DESCTABLE @@ -257,7 +261,7 @@ Table Parameters: numFiles 3 numRows 15 rawDataSize 3825 - totalSize 2325 + totalSize 2340 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out index 4d21749..963ac36 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge_incompat2.q.out @@ -207,18 +207,22 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_mer POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -252,7 +256,7 @@ Partition Parameters: numFiles 4 numRows 4 rawDataSize 1020 - totalSize 2092 + totalSize 2060 #### A masked pattern was here #### # Storage Information @@ -296,7 +300,7 @@ Partition Parameters: numFiles 4 numRows 8 rawDataSize 2040 - totalSize 2204 + totalSize 2188 #### A masked pattern was here #### # Storage Information @@ -392,18 +396,22 @@ POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=80.0 PREHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=0.8 POSTHOOK: query: analyze table orc_merge5a partition(st=0.8) compute statistics noscan POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 PREHOOK: query: desc formatted orc_merge5a partition(st=80.0) @@ -437,7 +445,7 @@ Partition Parameters: numFiles 3 numRows 4 rawDataSize 1020 - totalSize 1851 + totalSize 1819 #### A masked pattern was here #### # Storage Information @@ -481,7 +489,7 @@ Partition Parameters: numFiles 3 numRows 8 rawDataSize 2040 - totalSize 1944 + totalSize 1928 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out b/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out new file mode 100644 index 0000000..738abc4 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/orc_vectorization_ppd.q.out @@ -0,0 +1,288 @@ +PREHOOK: query: -- create table with 1000 rows +create table srcorc(key string, value string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcorc +POSTHOOK: query: -- create table with 1000 rows +create table srcorc(key string, value string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcorc +PREHOOK: query: insert overwrite table srcorc select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@srcorc +POSTHOOK: query: insert overwrite table srcorc select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@srcorc +POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert into table srcorc select * from src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@srcorc +POSTHOOK: query: insert into table srcorc select * from src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@srcorc +POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively +create table if not exists vectororc +(s1 string, +s2 string, +d double, +s3 string) +stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectororc +POSTHOOK: query: -- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively +create table if not exists vectororc +(s1 string, +s2 string, +d double, +s3 string) +stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectororc +PREHOOK: query: -- insert creates separate orc files +insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc +PREHOOK: type: QUERY +PREHOOK: Input: default@srcorc +PREHOOK: Output: default@vectororc +POSTHOOK: query: -- insert creates separate orc files +insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcorc +POSTHOOK: Output: default@vectororc +POSTHOOK: Lineage: vectororc.d EXPRESSION [] +POSTHOOK: Lineage: vectororc.s1 SIMPLE [] +POSTHOOK: Lineage: vectororc.s2 SIMPLE [] +POSTHOOK: Lineage: vectororc.s3 SIMPLE [] +PREHOOK: query: insert into table vectororc select null, "b", rand(2), "zoo" from srcorc +PREHOOK: type: QUERY +PREHOOK: Input: default@srcorc +PREHOOK: Output: default@vectororc +POSTHOOK: query: insert into table vectororc select null, "b", rand(2), "zoo" from srcorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcorc +POSTHOOK: Output: default@vectororc +POSTHOOK: Lineage: vectororc.d EXPRESSION [] +POSTHOOK: Lineage: vectororc.s1 EXPRESSION [] +POSTHOOK: Lineage: vectororc.s2 SIMPLE [] +POSTHOOK: Lineage: vectororc.s3 SIMPLE [] +PREHOOK: query: insert into table vectororc select null, "c", rand(3), "zoo" from srcorc +PREHOOK: type: QUERY +PREHOOK: Input: default@srcorc +PREHOOK: Output: default@vectororc +POSTHOOK: query: insert into table vectororc select null, "c", rand(3), "zoo" from srcorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcorc +POSTHOOK: Output: default@vectororc +POSTHOOK: Lineage: vectororc.d EXPRESSION [] +POSTHOOK: Lineage: vectororc.s1 EXPRESSION [] +POSTHOOK: Lineage: vectororc.s2 SIMPLE [] +POSTHOOK: Lineage: vectororc.s3 SIMPLE [] +PREHOOK: query: insert into table vectororc select "apple", "d", rand(4), "zoo" from srcorc +PREHOOK: type: QUERY +PREHOOK: Input: default@srcorc +PREHOOK: Output: default@vectororc +POSTHOOK: query: insert into table vectororc select "apple", "d", rand(4), "zoo" from srcorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcorc +POSTHOOK: Output: default@vectororc +POSTHOOK: Lineage: vectororc.d EXPRESSION [] +POSTHOOK: Lineage: vectororc.s1 SIMPLE [] +POSTHOOK: Lineage: vectororc.s2 SIMPLE [] +POSTHOOK: Lineage: vectororc.s3 SIMPLE [] +PREHOOK: query: insert into table vectororc select null, "e", rand(5), "z" from srcorc +PREHOOK: type: QUERY +PREHOOK: Input: default@srcorc +PREHOOK: Output: default@vectororc +POSTHOOK: query: insert into table vectororc select null, "e", rand(5), "z" from srcorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcorc +POSTHOOK: Output: default@vectororc +POSTHOOK: Lineage: vectororc.d EXPRESSION [] +POSTHOOK: Lineage: vectororc.s1 EXPRESSION [] +POSTHOOK: Lineage: vectororc.s2 SIMPLE [] +POSTHOOK: Lineage: vectororc.s3 SIMPLE [] +PREHOOK: query: insert into table vectororc select "apple", "f", rand(6), "z" from srcorc +PREHOOK: type: QUERY +PREHOOK: Input: default@srcorc +PREHOOK: Output: default@vectororc +POSTHOOK: query: insert into table vectororc select "apple", "f", rand(6), "z" from srcorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcorc +POSTHOOK: Output: default@vectororc +POSTHOOK: Lineage: vectororc.d EXPRESSION [] +POSTHOOK: Lineage: vectororc.s1 SIMPLE [] +POSTHOOK: Lineage: vectororc.s2 SIMPLE [] +POSTHOOK: Lineage: vectororc.s3 SIMPLE [] +PREHOOK: query: insert into table vectororc select null, "g", rand(7), "zoo" from srcorc +PREHOOK: type: QUERY +PREHOOK: Input: default@srcorc +PREHOOK: Output: default@vectororc +POSTHOOK: query: insert into table vectororc select null, "g", rand(7), "zoo" from srcorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcorc +POSTHOOK: Output: default@vectororc +POSTHOOK: Lineage: vectororc.d EXPRESSION [] +POSTHOOK: Lineage: vectororc.s1 EXPRESSION [] +POSTHOOK: Lineage: vectororc.s2 SIMPLE [] +POSTHOOK: Lineage: vectororc.s3 SIMPLE [] +PREHOOK: query: -- since vectororc table has multiple orc file we will load them into a single file using another table +create table if not exists testorc +(s1 string, +s2 string, +d double, +s3 string) +stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@testorc +POSTHOOK: query: -- since vectororc table has multiple orc file we will load them into a single file using another table +create table if not exists testorc +(s1 string, +s2 string, +d double, +s3 string) +stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="100000", "orc.compress.size"="10000") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@testorc +PREHOOK: query: insert overwrite table testorc select * from vectororc order by s2 +PREHOOK: type: QUERY +PREHOOK: Input: default@vectororc +PREHOOK: Output: default@testorc +POSTHOOK: query: insert overwrite table testorc select * from vectororc order by s2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectororc +POSTHOOK: Output: default@testorc +POSTHOOK: Lineage: testorc.d SIMPLE [(vectororc)vectororc.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: testorc.s1 SIMPLE [(vectororc)vectororc.FieldSchema(name:s1, type:string, comment:null), ] +POSTHOOK: Lineage: testorc.s2 SIMPLE [(vectororc)vectororc.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: testorc.s3 SIMPLE [(vectororc)vectororc.FieldSchema(name:s3, type:string, comment:null), ] +PREHOOK: query: -- row group (1,4) from stripe 1 and row group (1) from stripe 2 +-- PPD ONLY +select count(*),int(sum(d)) from testorc where s1 is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@testorc +#### A masked pattern was here #### +POSTHOOK: query: -- row group (1,4) from stripe 1 and row group (1) from stripe 2 +-- PPD ONLY +select count(*),int(sum(d)) from testorc where s1 is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@testorc +#### A masked pattern was here #### +3000 1505 +PREHOOK: query: -- VECTORIZATION + PPD +select count(*),int(sum(d)) from testorc where s1 is not null +PREHOOK: type: QUERY +PREHOOK: Input: default@testorc +#### A masked pattern was here #### +POSTHOOK: query: -- VECTORIZATION + PPD +select count(*),int(sum(d)) from testorc where s1 is not null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@testorc +#### A masked pattern was here #### +3000 1505 +PREHOOK: query: -- row group (2,3,5) from stripe 1 and row group (2) from stripe 2 +-- PPD ONLY +select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") +PREHOOK: type: QUERY +PREHOOK: Input: default@testorc +#### A masked pattern was here #### +POSTHOOK: query: -- row group (2,3,5) from stripe 1 and row group (2) from stripe 2 +-- PPD ONLY +select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@testorc +#### A masked pattern was here #### +4000 2006 +PREHOOK: query: -- VECTORIZATION + PPD +select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") +PREHOOK: type: QUERY +PREHOOK: Input: default@testorc +#### A masked pattern was here #### +POSTHOOK: query: -- VECTORIZATION + PPD +select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@testorc +#### A masked pattern was here #### +4000 2006 +PREHOOK: query: -- last row group of stripe 1 and first row group of stripe 2 +-- PPD ONLY +select count(*),int(sum(d)) from testorc where s3="z" +PREHOOK: type: QUERY +PREHOOK: Input: default@testorc +#### A masked pattern was here #### +POSTHOOK: query: -- last row group of stripe 1 and first row group of stripe 2 +-- PPD ONLY +select count(*),int(sum(d)) from testorc where s3="z" +POSTHOOK: type: QUERY +POSTHOOK: Input: default@testorc +#### A masked pattern was here #### +2000 1011 +PREHOOK: query: -- VECTORIZATION + PPD +select count(*),int(sum(d)) from testorc where s3="z" +PREHOOK: type: QUERY +PREHOOK: Input: default@testorc +#### A masked pattern was here #### +POSTHOOK: query: -- VECTORIZATION + PPD +select count(*),int(sum(d)) from testorc where s3="z" +POSTHOOK: type: QUERY +POSTHOOK: Input: default@testorc +#### A masked pattern was here #### +2000 1011 +PREHOOK: query: -- first row group of stripe 1 and last row group of stripe 2 +-- PPD ONLY +select count(*),int(sum(d)) from testorc where s2="a" or s2="g" +PREHOOK: type: QUERY +PREHOOK: Input: default@testorc +#### A masked pattern was here #### +POSTHOOK: query: -- first row group of stripe 1 and last row group of stripe 2 +-- PPD ONLY +select count(*),int(sum(d)) from testorc where s2="a" or s2="g" +POSTHOOK: type: QUERY +POSTHOOK: Input: default@testorc +#### A masked pattern was here #### +2000 1006 +PREHOOK: query: -- VECTORIZATION + PPD +select count(*),int(sum(d)) from testorc where s2="a" or s2="g" +PREHOOK: type: QUERY +PREHOOK: Input: default@testorc +#### A masked pattern was here #### +POSTHOOK: query: -- VECTORIZATION + PPD +select count(*),int(sum(d)) from testorc where s2="a" or s2="g" +POSTHOOK: type: QUERY +POSTHOOK: Input: default@testorc +#### A masked pattern was here #### +2000 1006 +PREHOOK: query: drop table srcorc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcorc +PREHOOK: Output: default@srcorc +POSTHOOK: query: drop table srcorc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcorc +POSTHOOK: Output: default@srcorc +PREHOOK: query: drop table vectororc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@vectororc +PREHOOK: Output: default@vectororc +POSTHOOK: query: drop table vectororc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@vectororc +POSTHOOK: Output: default@vectororc +PREHOOK: query: drop table testorc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@testorc +PREHOOK: Output: default@testorc +POSTHOOK: query: drop table testorc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@testorc +POSTHOOK: Output: default@testorc diff --git a/ql/src/test/results/clientpositive/tez/select_dummy_source.q.out b/ql/src/test/results/clientpositive/tez/select_dummy_source.q.out index e18487b..6f08083 100644 --- a/ql/src/test/results/clientpositive/tez/select_dummy_source.q.out +++ b/ql/src/test/results/clientpositive/tez/select_dummy_source.q.out @@ -239,37 +239,20 @@ explain select 2 + 3,x from (select 1 + 2 x) X POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: _dummy_table - Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator - expressions: 5 (type: int), 3 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Select Operator + expressions: 5 (type: int), 3 (type: int) + outputColumnNames: _col0, _col1 + ListSink PREHOOK: query: select 2 + 3,x from (select 1 + 2 x) X PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/tez/subquery_exists.q.out b/ql/src/test/results/clientpositive/tez/subquery_exists.q.out index c3d902e..c79b718 100644 --- a/ql/src/test/results/clientpositive/tez/subquery_exists.q.out +++ b/ql/src/test/results/clientpositive/tez/subquery_exists.q.out @@ -66,7 +66,7 @@ STAGE PLANS: Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/subquery_in.q.out b/ql/src/test/results/clientpositive/tez/subquery_in.q.out index ae24eab..d983c11 100644 --- a/ql/src/test/results/clientpositive/tez/subquery_in.q.out +++ b/ql/src/test/results/clientpositive/tez/subquery_in.q.out @@ -159,7 +159,7 @@ STAGE PLANS: value expressions: value (type: string) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: @@ -276,7 +276,7 @@ STAGE PLANS: Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: @@ -439,7 +439,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: @@ -545,7 +545,7 @@ STAGE PLANS: value expressions: p_mfgr (type: string), p_size (type: int) Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: @@ -714,7 +714,7 @@ STAGE PLANS: Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: @@ -901,7 +901,7 @@ STAGE PLANS: Statistics: Num rows: 432 Data size: 3024 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -917,7 +917,7 @@ STAGE PLANS: value expressions: _col0 (type: int), _col3 (type: int) Reducer 3 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Semi Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/tez_bmj_schema_evolution.q.out b/ql/src/test/results/clientpositive/tez/tez_bmj_schema_evolution.q.out index 4682a05..aec18a8 100644 --- a/ql/src/test/results/clientpositive/tez/tez_bmj_schema_evolution.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_bmj_schema_evolution.q.out @@ -113,6 +113,8 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 1 Statistics: Num rows: 550 Data size: 28771 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/tez_dml.q.out b/ql/src/test/results/clientpositive/tez/tez_dml.q.out index a6c45d5..9b7f564 100644 --- a/ql/src/test/results/clientpositive/tez/tez_dml.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_dml.q.out @@ -443,8 +443,6 @@ STAGE DEPENDENCIES: STAGE PLANS: Stage: Stage-1 Tez - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -456,24 +454,14 @@ STAGE PLANS: expressions: value (type: string), cnt (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 309 Data size: 2718 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: bigint) - sort order: + - Map-reduce partition columns: _col1 (type: bigint) + File Output Operator + compressed: false Statistics: Num rows: 309 Data size: 2718 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: bigint) - Reducer 2 - Reduce Operator Tree: - Extract - Statistics: Num rows: 309 Data size: 2718 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 309 Data size: 2718 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.tmp_src_part + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.tmp_src_part Stage: Stage-2 Dependency Collection @@ -529,314 +517,314 @@ POSTHOOK: Input: default@tmp_src_part@d=4 POSTHOOK: Input: default@tmp_src_part@d=5 #### A masked pattern was here #### val_490 1 -val_289 1 -val_291 1 -val_292 1 -val_296 1 -val_180 1 -val_30 1 -val_302 1 -val_305 1 -val_306 1 -val_491 1 -val_308 1 -val_96 1 -val_310 1 -val_178 1 -val_315 1 -val_177 1 -val_493 1 -val_170 1 -val_494 1 -val_495 1 -val_323 1 -val_496 1 -val_17 1 -val_33 1 -val_497 1 -val_332 1 -val_111 1 -val_335 1 -val_336 1 -val_338 1 -val_339 1 -val_34 1 -val_341 1 -val_11 1 -val_85 1 -val_345 1 -val_168 1 -val_166 1 -val_351 1 -val_53 1 -val_356 1 -val_360 1 -val_362 1 -val_364 1 -val_365 1 -val_366 1 -val_54 1 -val_368 1 -val_163 1 -val_57 1 -val_373 1 -val_374 1 -val_375 1 -val_377 1 -val_378 1 -val_379 1 -val_86 1 -val_162 1 -val_386 1 -val_389 1 -val_392 1 -val_393 1 -val_394 1 -val_64 1 -val_160 1 -val_65 1 -val_66 1 -val_4 1 -val_400 1 -val_158 1 -val_402 1 -val_157 1 -val_87 1 -val_156 1 -val_407 1 -val_155 1 -val_41 1 -val_411 1 -val_69 1 -val_105 1 -val_153 1 -val_418 1 -val_419 1 -val_9 1 -val_421 1 -val_74 1 -val_427 1 -val_10 1 -val_43 1 -val_150 1 -val_145 1 -val_432 1 -val_435 1 -val_436 1 -val_437 1 -val_143 1 -val_77 1 -val_44 1 -val_443 1 -val_444 1 -val_446 1 -val_448 1 -val_192 1 -val_190 1 -val_194 1 -val_449 1 -val_196 1 -val_452 1 -val_19 1 -val_2 1 -val_20 1 -val_453 1 -val_201 1 -val_202 1 -val_136 1 -val_455 1 -val_457 1 -val_189 1 -val_78 1 -val_8 1 -val_214 1 -val_460 1 -val_80 1 -val_218 1 -val_82 1 -val_133 1 -val_222 1 -val_467 1 -val_131 1 -val_226 1 -val_228 1 -val_126 1 -val_186 1 -val_47 1 -val_235 1 -val_470 1 -val_472 1 -val_475 1 -val_477 1 -val_241 1 -val_92 1 -val_244 1 -val_247 1 -val_248 1 -val_249 1 -val_252 1 -val_479 1 -val_116 1 -val_257 1 -val_258 1 -val_481 1 -val_260 1 -val_262 1 -val_263 1 -val_482 1 -val_266 1 -val_27 1 -val_483 1 -val_183 1 -val_274 1 -val_275 1 -val_181 1 -val_484 1 -val_28 1 -val_485 1 -val_487 1 -val_114 1 -val_283 1 -val_284 1 -val_285 1 -val_286 1 val_287 1 -val_84 2 -val_95 2 -val_97 2 -val_98 2 -val_100 2 -val_103 2 -val_104 2 -val_113 2 -val_118 2 -val_12 2 -val_120 2 -val_125 2 -val_129 2 -val_134 2 -val_137 2 -val_146 2 -val_149 2 -val_15 2 -val_152 2 -val_164 2 -val_165 2 -val_172 2 -val_174 2 -val_175 2 -val_176 2 -val_179 2 -val_18 2 -val_191 2 -val_195 2 -val_197 2 -val_200 2 -val_203 2 -val_205 2 -val_207 2 -val_209 2 -val_213 2 -val_216 2 -val_217 2 -val_219 2 -val_221 2 -val_223 2 -val_224 2 -val_229 2 -val_233 2 -val_237 2 -val_238 2 -val_239 2 -val_24 2 -val_242 2 -val_255 2 -val_256 2 -val_26 2 -val_265 2 -val_272 2 -val_278 2 -val_280 2 -val_281 2 -val_282 2 -val_288 2 -val_307 2 -val_309 2 -val_317 2 -val_321 2 -val_322 2 -val_325 2 -val_331 2 -val_333 2 -val_342 2 -val_344 2 -val_353 2 -val_367 2 -val_37 2 -val_382 2 -val_395 2 -val_397 2 -val_399 2 -val_404 2 -val_413 2 -val_414 2 -val_42 2 -val_424 2 -val_429 2 -val_439 2 -val_458 2 -val_459 2 -val_462 2 -val_463 2 -val_478 2 -val_492 2 -val_51 2 -val_58 2 -val_67 2 -val_72 2 -val_76 2 +val_286 1 +val_285 1 +val_284 1 +val_283 1 +val_114 1 +val_487 1 +val_485 1 +val_28 1 +val_484 1 +val_181 1 +val_275 1 +val_274 1 +val_183 1 +val_483 1 +val_27 1 +val_266 1 +val_482 1 +val_263 1 +val_262 1 +val_260 1 +val_481 1 +val_258 1 +val_257 1 +val_116 1 +val_479 1 +val_252 1 +val_249 1 +val_248 1 +val_247 1 +val_244 1 +val_92 1 +val_241 1 +val_477 1 +val_475 1 +val_472 1 +val_470 1 +val_235 1 +val_47 1 +val_186 1 +val_126 1 +val_228 1 +val_226 1 +val_131 1 +val_467 1 +val_222 1 +val_133 1 +val_82 1 +val_218 1 +val_80 1 +val_460 1 +val_214 1 +val_8 1 +val_78 1 +val_189 1 +val_457 1 +val_455 1 +val_136 1 +val_202 1 +val_201 1 +val_453 1 +val_20 1 +val_2 1 +val_19 1 +val_452 1 +val_196 1 +val_449 1 +val_194 1 +val_190 1 +val_192 1 +val_448 1 +val_446 1 +val_444 1 +val_443 1 +val_44 1 +val_77 1 +val_143 1 +val_437 1 +val_436 1 +val_435 1 +val_432 1 +val_145 1 +val_150 1 +val_43 1 +val_10 1 +val_427 1 +val_74 1 +val_421 1 +val_9 1 +val_419 1 +val_418 1 +val_153 1 +val_105 1 +val_69 1 +val_411 1 +val_41 1 +val_155 1 +val_407 1 +val_156 1 +val_87 1 +val_157 1 +val_402 1 +val_158 1 +val_400 1 +val_4 1 +val_66 1 +val_65 1 +val_160 1 +val_64 1 +val_394 1 +val_393 1 +val_392 1 +val_389 1 +val_386 1 +val_162 1 +val_86 1 +val_379 1 +val_378 1 +val_377 1 +val_375 1 +val_374 1 +val_373 1 +val_57 1 +val_163 1 +val_368 1 +val_54 1 +val_366 1 +val_365 1 +val_364 1 +val_362 1 +val_360 1 +val_356 1 +val_53 1 +val_351 1 +val_166 1 +val_168 1 +val_345 1 +val_85 1 +val_11 1 +val_341 1 +val_34 1 +val_339 1 +val_338 1 +val_336 1 +val_335 1 +val_111 1 +val_332 1 +val_497 1 +val_33 1 +val_17 1 +val_496 1 +val_323 1 +val_495 1 +val_494 1 +val_170 1 +val_493 1 +val_177 1 +val_315 1 +val_178 1 +val_310 1 +val_96 1 +val_308 1 +val_491 1 +val_306 1 +val_305 1 +val_302 1 +val_30 1 +val_180 1 +val_296 1 +val_292 1 +val_291 1 +val_289 1 +val_98 2 +val_97 2 +val_95 2 +val_84 2 val_83 2 -val_396 3 -val_384 3 -val_369 3 +val_76 2 +val_72 2 +val_67 2 +val_58 2 +val_51 2 +val_492 2 +val_478 2 +val_463 2 +val_462 2 +val_459 2 +val_458 2 +val_439 2 +val_429 2 +val_424 2 +val_42 2 +val_414 2 +val_413 2 +val_404 2 +val_399 2 +val_397 2 +val_395 2 +val_382 2 +val_37 2 +val_367 2 +val_353 2 +val_344 2 +val_342 2 +val_333 2 +val_331 2 +val_325 2 +val_322 2 +val_321 2 +val_317 2 +val_309 2 +val_307 2 +val_288 2 +val_282 2 +val_281 2 +val_280 2 +val_278 2 +val_272 2 +val_265 2 +val_26 2 +val_256 2 +val_255 2 +val_242 2 +val_24 2 +val_239 2 +val_238 2 +val_237 2 +val_233 2 +val_229 2 +val_224 2 +val_223 2 +val_221 2 +val_219 2 +val_217 2 +val_216 2 +val_213 2 +val_209 2 +val_207 2 +val_205 2 +val_203 2 +val_200 2 +val_197 2 +val_195 2 +val_191 2 +val_18 2 +val_179 2 +val_176 2 +val_175 2 +val_174 2 +val_172 2 +val_165 2 +val_164 2 +val_152 2 +val_15 2 +val_149 2 +val_146 2 +val_137 2 +val_134 2 +val_129 2 +val_125 2 +val_120 2 +val_12 2 +val_118 2 +val_113 2 +val_104 2 +val_103 2 +val_100 2 val_498 3 -val_35 3 -val_167 3 -val_327 3 -val_318 3 -val_128 3 -val_90 3 -val_466 3 -val_316 3 -val_311 3 -val_454 3 -val_298 3 -val_273 3 -val_187 3 -val_208 3 -val_199 3 -val_193 3 -val_480 3 -val_438 3 -val_431 3 -val_0 3 -val_119 3 -val_70 3 -val_430 3 -val_5 3 -val_417 3 -val_409 3 +val_369 3 +val_384 3 +val_396 3 val_403 3 -val_406 4 -val_489 4 +val_409 3 +val_417 3 +val_5 3 +val_430 3 +val_70 3 +val_119 3 +val_0 3 +val_431 3 +val_438 3 +val_480 3 +val_193 3 +val_199 3 +val_208 3 +val_187 3 +val_273 3 +val_298 3 +val_454 3 +val_311 3 +val_316 3 +val_466 3 +val_90 3 +val_128 3 +val_318 3 +val_327 3 +val_167 3 +val_35 3 val_468 4 -val_277 4 -val_138 4 +val_489 4 +val_406 4 val_169 4 -val_348 5 -val_230 5 -val_401 5 +val_138 4 +val_277 4 val_469 5 +val_401 5 +val_230 5 +val_348 5 PREHOOK: query: -- multi insert CREATE TABLE even (c int, d string) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out b/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out index b954cb0..e698d72 100644 --- a/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out @@ -63,7 +63,7 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out b/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out index 5c1cb98..64285b7 100644 --- a/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out @@ -52,7 +52,7 @@ STAGE PLANS: Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -83,7 +83,7 @@ STAGE PLANS: value expressions: _col0 (type: string) Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Right Outer Join0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out b/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out index 6620403..b600345 100644 --- a/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out @@ -52,7 +52,7 @@ STAGE PLANS: Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Left Outer Join0 to 1 condition expressions: @@ -83,7 +83,7 @@ STAGE PLANS: value expressions: _col0 (type: string) Reducer 4 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Right Outer Join0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/tez_smb_1.q.out b/ql/src/test/results/clientpositive/tez/tez_smb_1.q.out new file mode 100644 index 0000000..0bb2ec8 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/tez_smb_1.q.out @@ -0,0 +1,689 @@ +PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin +PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_part +POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_part +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part +POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab +POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab +PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: explain +select count(*) from tab s1 join tab s3 on s1.key=s3.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from tab s1 join tab s3 on s1.key=s3.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: s3 + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: s1 + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select s1.key, s1.value, s3.value from tab s1 join tab s3 on s1.key=s3.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select s1.key, s1.value, s3.value from tab s1 join tab s3 on s1.key=s3.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +#### A masked pattern was here #### +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +2 val_2 val_2 +4 val_4 val_4 +8 val_8 val_8 +20 val_20 val_20 +24 val_24 val_24 +24 val_24 val_24 +24 val_24 val_24 +24 val_24 val_24 +26 val_26 val_26 +26 val_26 val_26 +26 val_26 val_26 +26 val_26 val_26 +28 val_28 val_28 +42 val_42 val_42 +42 val_42 val_42 +42 val_42 val_42 +42 val_42 val_42 +44 val_44 val_44 +64 val_64 val_64 +66 val_66 val_66 +80 val_80 val_80 +82 val_82 val_82 +84 val_84 val_84 +84 val_84 val_84 +84 val_84 val_84 +84 val_84 val_84 +86 val_86 val_86 +114 val_114 val_114 +116 val_116 val_116 +118 val_118 val_118 +118 val_118 val_118 +118 val_118 val_118 +118 val_118 val_118 +134 val_134 val_134 +134 val_134 val_134 +134 val_134 val_134 +134 val_134 val_134 +136 val_136 val_136 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +150 val_150 val_150 +152 val_152 val_152 +152 val_152 val_152 +152 val_152 val_152 +152 val_152 val_152 +156 val_156 val_156 +158 val_158 val_158 +170 val_170 val_170 +172 val_172 val_172 +172 val_172 val_172 +172 val_172 val_172 +172 val_172 val_172 +174 val_174 val_174 +174 val_174 val_174 +174 val_174 val_174 +174 val_174 val_174 +176 val_176 val_176 +176 val_176 val_176 +176 val_176 val_176 +176 val_176 val_176 +178 val_178 val_178 +190 val_190 val_190 +192 val_192 val_192 +194 val_194 val_194 +196 val_196 val_196 +200 val_200 val_200 +200 val_200 val_200 +200 val_200 val_200 +200 val_200 val_200 +202 val_202 val_202 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +222 val_222 val_222 +224 val_224 val_224 +224 val_224 val_224 +224 val_224 val_224 +224 val_224 val_224 +226 val_226 val_226 +228 val_228 val_228 +242 val_242 val_242 +242 val_242 val_242 +242 val_242 val_242 +242 val_242 val_242 +244 val_244 val_244 +248 val_248 val_248 +260 val_260 val_260 +262 val_262 val_262 +266 val_266 val_266 +280 val_280 val_280 +280 val_280 val_280 +280 val_280 val_280 +280 val_280 val_280 +282 val_282 val_282 +282 val_282 val_282 +282 val_282 val_282 +282 val_282 val_282 +284 val_284 val_284 +286 val_286 val_286 +288 val_288 val_288 +288 val_288 val_288 +288 val_288 val_288 +288 val_288 val_288 +310 val_310 val_310 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +332 val_332 val_332 +336 val_336 val_336 +338 val_338 val_338 +356 val_356 val_356 +374 val_374 val_374 +378 val_378 val_378 +392 val_392 val_392 +394 val_394 val_394 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +400 val_400 val_400 +402 val_402 val_402 +404 val_404 val_404 +404 val_404 val_404 +404 val_404 val_404 +404 val_404 val_404 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +424 val_424 val_424 +424 val_424 val_424 +424 val_424 val_424 +424 val_424 val_424 +444 val_444 val_444 +446 val_446 val_446 +448 val_448 val_448 +460 val_460 val_460 +462 val_462 val_462 +462 val_462 val_462 +462 val_462 val_462 +462 val_462 val_462 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +482 val_482 val_482 +484 val_484 val_484 +11 val_11 val_11 +15 val_15 val_15 +15 val_15 val_15 +15 val_15 val_15 +15 val_15 val_15 +17 val_17 val_17 +19 val_19 val_19 +33 val_33 val_33 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +37 val_37 val_37 +37 val_37 val_37 +37 val_37 val_37 +37 val_37 val_37 +51 val_51 val_51 +51 val_51 val_51 +51 val_51 val_51 +51 val_51 val_51 +53 val_53 val_53 +57 val_57 val_57 +77 val_77 val_77 +95 val_95 val_95 +95 val_95 val_95 +95 val_95 val_95 +95 val_95 val_95 +97 val_97 val_97 +97 val_97 val_97 +97 val_97 val_97 +97 val_97 val_97 +103 val_103 val_103 +103 val_103 val_103 +103 val_103 val_103 +103 val_103 val_103 +105 val_105 val_105 +125 val_125 val_125 +125 val_125 val_125 +125 val_125 val_125 +125 val_125 val_125 +129 val_129 val_129 +129 val_129 val_129 +129 val_129 val_129 +129 val_129 val_129 +143 val_143 val_143 +145 val_145 val_145 +149 val_149 val_149 +149 val_149 val_149 +149 val_149 val_149 +149 val_149 val_149 +163 val_163 val_163 +165 val_165 val_165 +165 val_165 val_165 +165 val_165 val_165 +165 val_165 val_165 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +181 val_181 val_181 +183 val_183 val_183 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +189 val_189 val_189 +213 val_213 val_213 +213 val_213 val_213 +213 val_213 val_213 +213 val_213 val_213 +217 val_217 val_217 +217 val_217 val_217 +217 val_217 val_217 +217 val_217 val_217 +219 val_219 val_219 +219 val_219 val_219 +219 val_219 val_219 +219 val_219 val_219 +233 val_233 val_233 +233 val_233 val_233 +233 val_233 val_233 +233 val_233 val_233 +235 val_235 val_235 +237 val_237 val_237 +237 val_237 val_237 +237 val_237 val_237 +237 val_237 val_237 +239 val_239 val_239 +239 val_239 val_239 +239 val_239 val_239 +239 val_239 val_239 +255 val_255 val_255 +255 val_255 val_255 +255 val_255 val_255 +255 val_255 val_255 +257 val_257 val_257 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +275 val_275 val_275 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +291 val_291 val_291 +305 val_305 val_305 +307 val_307 val_307 +307 val_307 val_307 +307 val_307 val_307 +307 val_307 val_307 +309 val_309 val_309 +309 val_309 val_309 +309 val_309 val_309 +309 val_309 val_309 +321 val_321 val_321 +321 val_321 val_321 +321 val_321 val_321 +321 val_321 val_321 +323 val_323 val_323 +325 val_325 val_325 +325 val_325 val_325 +325 val_325 val_325 +325 val_325 val_325 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +341 val_341 val_341 +345 val_345 val_345 +365 val_365 val_365 +367 val_367 val_367 +367 val_367 val_367 +367 val_367 val_367 +367 val_367 val_367 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +389 val_389 val_389 +411 val_411 val_411 +413 val_413 val_413 +413 val_413 val_413 +413 val_413 val_413 +413 val_413 val_413 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +419 val_419 val_419 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +435 val_435 val_435 +437 val_437 val_437 +439 val_439 val_439 +439 val_439 val_439 +439 val_439 val_439 +439 val_439 val_439 +453 val_453 val_453 +455 val_455 val_455 +457 val_457 val_457 +459 val_459 val_459 +459 val_459 val_459 +459 val_459 val_459 +459 val_459 val_459 +475 val_475 val_475 +477 val_477 val_477 +479 val_479 val_479 +491 val_491 val_491 +493 val_493 val_493 +495 val_495 val_495 +497 val_497 val_497 +PREHOOK: query: select count(*) from tab s2 +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from tab s2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +#### A masked pattern was here #### +242 diff --git a/ql/src/test/results/clientpositive/tez/tez_smb_main.q.out b/ql/src/test/results/clientpositive/tez/tez_smb_main.q.out new file mode 100644 index 0000000..1f4e305 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/tez_smb_main.q.out @@ -0,0 +1,1288 @@ +PREHOOK: query: explain +select * from src a join src1 b on a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from src a join src1 b on a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from src a join src1 b on a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Input: default@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from src a join src1 b on a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Input: default@src1 +#### A masked pattern was here #### +128 val_128 128 +128 val_128 128 +128 val_128 128 +146 val_146 146 val_146 +146 val_146 146 val_146 +150 val_150 150 val_150 +213 val_213 213 val_213 +213 val_213 213 val_213 +224 val_224 224 +224 val_224 224 +238 val_238 238 val_238 +238 val_238 238 val_238 +255 val_255 255 val_255 +255 val_255 255 val_255 +273 val_273 273 val_273 +273 val_273 273 val_273 +273 val_273 273 val_273 +278 val_278 278 val_278 +278 val_278 278 val_278 +311 val_311 311 val_311 +311 val_311 311 val_311 +311 val_311 311 val_311 +369 val_369 369 +369 val_369 369 +369 val_369 369 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +401 val_401 401 val_401 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +406 val_406 406 val_406 +66 val_66 66 val_66 +98 val_98 98 val_98 +98 val_98 98 val_98 +PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin +PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_part +POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_part +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket21.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part +POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab +POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab +PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: explain +select count(*) +from tab a join tab_part b on a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) +from tab a join tab_part b on a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (CUSTOM_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) +from tab a join tab_part b on a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) +from tab a join tab_part b on a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +480 +PREHOOK: query: explain +select count (*) +from tab a join tab_part b on a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count (*) +from tab a join tab_part b on a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (CUSTOM_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) +from tab a join tab_part b on a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) +from tab a join tab_part b on a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +480 +PREHOOK: query: explain +select count (*) +from tab a join tab_part b on a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count (*) +from tab a join tab_part b on a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (CUSTOM_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 key (type: int) + 1 key (type: int) + input vertices: + 0 Map 3 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) +from tab a join tab_part b on a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) +from tab a join tab_part b on a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +480 +PREHOOK: query: explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (CUSTOM_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1 + input vertices: + 0 Map 4 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col1 (type: string) + 1 value (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and value is not null) (type: boolean) + Statistics: Num rows: 61 Data size: 646 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 61 Data size: 646 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +40 +PREHOOK: query: explain select count(*) from tab a join tab_part b on a.value = b.value +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from tab a join tab_part b on a.value = b.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +480 +PREHOOK: query: explain +select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +UNION ALL +select s2.key as key, s2.value as value from tab s2 +) a join tab_part b on (a.key = b.key) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +UNION ALL +select s2.key as key, s2.value as value from tab s2 +) a join tab_part b on (a.key = b.key) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 5 <- Union 6 (CONTAINS) + Map 7 <- Union 6 (CONTAINS) + Reducer 3 <- Map 2 (SIMPLE_EDGE), Union 6 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: s3 + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Map Operator Tree: + TableScan + alias: s1 + Filter Operator + predicate: key is not null (type: boolean) + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Map 7 + Map Operator Tree: + TableScan + alias: s2 + Filter Operator + predicate: key is not null (type: boolean) + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 279 Data size: 2963 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 279 Data size: 2963 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Union 6 + Vertex: Union 6 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain select count(*) from tab a join tab_part b on a.value = b.value +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from tab a join tab_part b on a.value = b.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 value (type: string) + 1 value (type: string) + input vertices: + 0 Map 3 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from tab a join tab_part b on a.value = b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +480 +PREHOOK: query: explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +PREHOOK: type: QUERY +POSTHOOK: query: explain select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (CUSTOM_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1 + input vertices: + 0 Map 4 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col1 (type: string) + 1 value (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: string) + sort order: + + Map-reduce partition columns: value (type: string) + Statistics: Num rows: 13 Data size: 99 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key is not null and value is not null) (type: boolean) + Statistics: Num rows: 61 Data size: 646 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 61 Data size: 646 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from tab a join tab_part b on a.key = b.key join src1 c on a.value = c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +40 +PREHOOK: query: explain +select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +UNION ALL +select s2.key as key, s2.value as value from tab s2 +) a join tab_part b on (a.key = b.key) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select count(*) from (select s1.key as key, s1.value as value from tab s1 join tab s3 on s1.key=s3.key +UNION ALL +select s2.key as key, s2.value as value from tab s2 +) a join tab_part b on (a.key = b.key) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 3 <- Map 1 (CUSTOM_EDGE), Map 2 (BROADCAST_EDGE), Union 4 (CONTAINS) + Map 6 <- Map 2 (BROADCAST_EDGE), Union 4 (CONTAINS) + Reducer 5 <- Union 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: s3 + Statistics: Num rows: 242 Data size: 2566 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 121 Data size: 1283 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Map 3 + Map Operator Tree: + TableScan + alias: s1 + Filter Operator + predicate: key is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} + 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col0 + input vertices: + 1 Map 1 + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 key (type: int) + input vertices: + 1 Map 2 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Map 6 + Map Operator Tree: + TableScan + alias: s2 + Filter Operator + predicate: key is not null (type: boolean) + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col0 (type: int) + 1 key (type: int) + input vertices: + 1 Map 2 + Select Operator + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: bigint) + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Union 4 + Vertex: Union 4 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/ql/src/test/results/clientpositive/tez/tez_union.q.out b/ql/src/test/results/clientpositive/tez/tez_union.q.out index eff5b5e..e1ea884 100644 --- a/ql/src/test/results/clientpositive/tez/tez_union.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_union.q.out @@ -36,6 +36,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0, _col1 + input vertices: + 1 Map 3 Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 @@ -218,7 +220,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: string) Reducer 3 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: @@ -355,6 +357,8 @@ STAGE PLANS: 0 key (type: string) 1 _col0 (type: string) outputColumnNames: _col0, _col5 + input vertices: + 0 Map 1 Select Operator expressions: _col0 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1 @@ -383,6 +387,8 @@ STAGE PLANS: 0 key (type: string) 1 _col0 (type: string) outputColumnNames: _col0, _col5 + input vertices: + 0 Map 1 Select Operator expressions: _col0 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1 @@ -535,6 +541,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 5 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -551,6 +559,9 @@ STAGE PLANS: 1 _col0 (type: string) 2 key (type: string) outputColumnNames: _col0, _col5, _col6 + input vertices: + 0 Map 1 + 2 Map 8 Select Operator expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 @@ -600,6 +611,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 6 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -616,6 +629,9 @@ STAGE PLANS: 1 _col0 (type: string) 2 key (type: string) outputColumnNames: _col0, _col5, _col6 + input vertices: + 0 Map 1 + 2 Map 8 Select Operator expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 @@ -662,6 +678,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 10 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -678,6 +696,9 @@ STAGE PLANS: 1 _col0 (type: string) 2 key (type: string) outputColumnNames: _col0, _col5, _col6 + input vertices: + 0 Map 1 + 2 Map 8 Select Operator expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 @@ -883,6 +904,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 3 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -924,6 +947,8 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) outputColumnNames: _col0 + input vertices: + 1 Map 5 Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 @@ -1057,6 +1082,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 3 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 @@ -1100,6 +1127,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 3 Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 @@ -1167,6 +1196,8 @@ STAGE PLANS: 0 _col0 (type: string) 1 key (type: string) outputColumnNames: _col0, _col2 + input vertices: + 0 Union 2 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col2 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out new file mode 100644 index 0000000..59a6e57 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out @@ -0,0 +1,326 @@ +PREHOOK: query: CREATE TABLE x +( +u bigint, +t string, +st string +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@x +POSTHOOK: query: CREATE TABLE x +( +u bigint, +t string, +st string +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@x +PREHOOK: query: CREATE TABLE y +( +u bigint +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@y +POSTHOOK: query: CREATE TABLE y +( +u bigint +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@y +PREHOOK: query: CREATE TABLE z +( +u bigint +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@z +POSTHOOK: query: CREATE TABLE z +( +u bigint +) +PARTITIONED BY (date string) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@z +PREHOOK: query: CREATE TABLE v +( +t string, +st string, +id int +) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@v +POSTHOOK: query: CREATE TABLE v +( +t string, +st string, +id int +) +STORED AS ORC +TBLPROPERTIES ("orc.compress"="ZLIB") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v +PREHOOK: query: EXPLAIN +SELECT o.u, n.u +FROM +( +SELECT m.u, Min(date) as ft +FROM +( +SELECT u, date FROM x WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM y WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM z WHERE date < '2014-09-02' +) m +GROUP BY m.u +) n +LEFT OUTER JOIN +( +SELECT x.u +FROM x +JOIN v +ON (x.t = v.t AND x.st <=> v.st) +WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +GROUP BY x.u +) o +ON n.u = o.u +WHERE n.u <> 0 AND n.ft <= '2014-09-02' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT o.u, n.u +FROM +( +SELECT m.u, Min(date) as ft +FROM +( +SELECT u, date FROM x WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM y WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM z WHERE date < '2014-09-02' +) m +GROUP BY m.u +) n +LEFT OUTER JOIN +( +SELECT x.u +FROM x +JOIN v +ON (x.t = v.t AND x.st <=> v.st) +WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +GROUP BY x.u +) o +ON n.u = o.u +WHERE n.u <> 0 AND n.ft <= '2014-09-02' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 10 <- Union 7 (CONTAINS) + Map 6 <- Union 7 (CONTAINS) + Map 9 <- Union 7 (CONTAINS) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE) + Reducer 8 <- Union 7 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: v + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: t is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: t (type: string), st (type: string) + sort order: ++ + Map-reduce partition columns: t (type: string), st (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 10 + Map 5 + Map 6 + Map 9 + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: bigint) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Map-reduce partition columns: _col0 (type: bigint) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: bigint) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Map-reduce partition columns: _col0 (type: bigint) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 4 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} + 1 {KEY.reducesinkkey0} + outputColumnNames: _col0, _col2 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col2 (type: bigint), _col0 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 8 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + keys: KEY._col0 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (_col1 <= '2014-09-02') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Map-reduce partition columns: _col0 (type: bigint) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Union 7 + Vertex: Union 7 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT o.u, n.u +FROM +( +SELECT m.u, Min(date) as ft +FROM +( +SELECT u, date FROM x WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM y WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM z WHERE date < '2014-09-02' +) m +GROUP BY m.u +) n +LEFT OUTER JOIN +( +SELECT x.u +FROM x +JOIN v +ON (x.t = v.t AND x.st <=> v.st) +WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +GROUP BY x.u +) o +ON n.u = o.u +WHERE n.u <> 0 AND n.ft <= '2014-09-02' +PREHOOK: type: QUERY +PREHOOK: Input: default@v +PREHOOK: Input: default@x +PREHOOK: Input: default@y +PREHOOK: Input: default@z +#### A masked pattern was here #### +POSTHOOK: query: SELECT o.u, n.u +FROM +( +SELECT m.u, Min(date) as ft +FROM +( +SELECT u, date FROM x WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM y WHERE date < '2014-09-02' +UNION ALL +SELECT u, date FROM z WHERE date < '2014-09-02' +) m +GROUP BY m.u +) n +LEFT OUTER JOIN +( +SELECT x.u +FROM x +JOIN v +ON (x.t = v.t AND x.st <=> v.st) +WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +GROUP BY x.u +) o +ON n.u = o.u +WHERE n.u <> 0 AND n.ft <= '2014-09-02' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@v +POSTHOOK: Input: default@x +POSTHOOK: Input: default@y +POSTHOOK: Input: default@z +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/tez/union7.q.out b/ql/src/test/results/clientpositive/tez/union7.q.out index 43f678e..2971029 100644 --- a/ql/src/test/results/clientpositive/tez/union7.q.out +++ b/ql/src/test/results/clientpositive/tez/union7.q.out @@ -92,14 +92,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/tez/update_after_multiple_inserts.q.out b/ql/src/test/results/clientpositive/tez/update_after_multiple_inserts.q.out index cb8e319..a2ad3af 100644 --- a/ql/src/test/results/clientpositive/tez/update_after_multiple_inserts.q.out +++ b/ql/src/test/results/clientpositive/tez/update_after_multiple_inserts.q.out @@ -1,12 +1,12 @@ PREHOOK: query: create table acid_uami(i int, de decimal(5,2), - vc varchar(128)) clustered by (i) into 2 buckets stored as orc + vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uami POSTHOOK: query: create table acid_uami(i int, de decimal(5,2), - vc varchar(128)) clustered by (i) into 2 buckets stored as orc + vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uami diff --git a/ql/src/test/results/clientpositive/tez/update_all_non_partitioned.q.out b/ql/src/test/results/clientpositive/tez/update_all_non_partitioned.q.out index fde6d8d..39dd71b 100644 --- a/ql/src/test/results/clientpositive/tez/update_all_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/update_all_non_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uanp -POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uanp diff --git a/ql/src/test/results/clientpositive/tez/update_all_partitioned.q.out b/ql/src/test/results/clientpositive/tez/update_all_partitioned.q.out index 3fae6a9..533dd88 100644 --- a/ql/src/test/results/clientpositive/tez/update_all_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/update_all_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uap -POSTHOOK: query: create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uap @@ -85,15 +85,21 @@ POSTHOOK: Input: default@acid_uap@ds=today POSTHOOK: Input: default@acid_uap@ds=tomorrow #### A masked pattern was here #### -1073279343 fred today +-1073279343 oj1YrV5Wa today -1073051226 fred today -1072910839 fred today +-1072081801 dPkN74F7 today -1072081801 fred today -1072076362 fred today -1071480828 fred today +-1071363017 Anj0oF today -1071363017 fred today +-1070883071 0ruyd6Y50JpdGRf6HqD today -1070883071 fred today -1070551679 fred today +-1070551679 iUR3Q today -1069736047 fred today +-1069736047 k17Am8uPHWk02cEf1jet today 762 fred tomorrow 762 fred tomorrow 762 fred tomorrow diff --git a/ql/src/test/results/clientpositive/tez/update_all_types.q.out b/ql/src/test/results/clientpositive/tez/update_all_types.q.out index 36b4684..f1353d0 100644 --- a/ql/src/test/results/clientpositive/tez/update_all_types.q.out +++ b/ql/src/test/results/clientpositive/tez/update_all_types.q.out @@ -10,7 +10,7 @@ PREHOOK: query: create table acid_uat(ti tinyint, s string, vc varchar(128), ch char(36), - b boolean) clustered by (i) into 2 buckets stored as orc + b boolean) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uat @@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_uat(ti tinyint, s string, vc varchar(128), ch char(36), - b boolean) clustered by (i) into 2 buckets stored as orc + b boolean) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uat @@ -151,3 +151,37 @@ NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vk NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false 11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true 1 2 3 4 3.14 6.28 5.99 NULL 2014-09-01 its a beautiful day in the neighbhorhood a beautiful day for a neighbor wont you be mine true +PREHOOK: query: update acid_uat set + ti = ti * 2, + si = cast(f as int), + d = floor(de) + where s = 'aw724t8c5558x2xneC624' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_uat +PREHOOK: Output: default@acid_uat +POSTHOOK: query: update acid_uat set + ti = ti * 2, + si = cast(f as int), + d = floor(de) + where s = 'aw724t8c5558x2xneC624' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_uat +POSTHOOK: Output: default@acid_uat +PREHOOK: query: select * from acid_uat order by i +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_uat +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_uat order by i +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_uat +#### A masked pattern was here #### +11 NULL -1073279343 -1595604468 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true +NULL -7382 -1073051226 -1887561756 NULL -7382.0 NULL NULL 1970-01-01 A34p7oRr2WvUJNf A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d false +11 NULL -1072910839 2048385991 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false +NULL 8373 -1072081801 1864027286 NULL 8373.0 NULL NULL 1970-01-01 dPkN74F7 dPkN74F7 4KWs6gw7lv2WYd66P true +NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vklju213J1mr 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P true +-102 -51 -1071480828 -1401575336 -51.0 -51.0 -51.0 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true +8 NULL -1071363017 1349676361 8.0 NULL 8.0 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true +NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false +11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true +1 2 3 4 3.14 6.28 5.99 NULL 2014-09-01 its a beautiful day in the neighbhorhood a beautiful day for a neighbor wont you be mine true diff --git a/ql/src/test/results/clientpositive/tez/update_tmp_table.q.out b/ql/src/test/results/clientpositive/tez/update_tmp_table.q.out index 8180f06..3c86a0c 100644 --- a/ql/src/test/results/clientpositive/tez/update_tmp_table.q.out +++ b/ql/src/test/results/clientpositive/tez/update_tmp_table.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc +PREHOOK: query: create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_utt -POSTHOOK: query: create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc +POSTHOOK: query: create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_utt diff --git a/ql/src/test/results/clientpositive/tez/update_two_cols.q.out b/ql/src/test/results/clientpositive/tez/update_two_cols.q.out index 553608f..5132c0c 100644 --- a/ql/src/test/results/clientpositive/tez/update_two_cols.q.out +++ b/ql/src/test/results/clientpositive/tez/update_two_cols.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_utc -POSTHOOK: query: create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_utc diff --git a/ql/src/test/results/clientpositive/tez/update_where_no_match.q.out b/ql/src/test/results/clientpositive/tez/update_where_no_match.q.out index afef267..c88899e 100644 --- a/ql/src/test/results/clientpositive/tez/update_where_no_match.q.out +++ b/ql/src/test/results/clientpositive/tez/update_where_no_match.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_wnm -POSTHOOK: query: create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_wnm diff --git a/ql/src/test/results/clientpositive/tez/update_where_non_partitioned.q.out b/ql/src/test/results/clientpositive/tez/update_where_non_partitioned.q.out index 5c79379..9c79235 100644 --- a/ql/src/test/results/clientpositive/tez/update_where_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/update_where_non_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uwnp -POSTHOOK: query: create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uwnp diff --git a/ql/src/test/results/clientpositive/tez/update_where_partitioned.q.out b/ql/src/test/results/clientpositive/tez/update_where_partitioned.q.out index b83c52a..fef0dc0 100644 --- a/ql/src/test/results/clientpositive/tez/update_where_partitioned.q.out +++ b/ql/src/test/results/clientpositive/tez/update_where_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uwp -POSTHOOK: query: create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uwp diff --git a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out new file mode 100644 index 0000000..a7037f7 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out @@ -0,0 +1,740 @@ +PREHOOK: query: CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@decimal_date_test +POSTHOOK: query: CREATE TABLE decimal_date_test STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, CAST(CAST((CAST(cint AS BIGINT) *ctinyint) AS TIMESTAMP) AS DATE) AS cdate FROM alltypesorc ORDER BY cdate +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@decimal_date_test +PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_date_test + Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (cdate) IN (1969-10-26, 1969-07-14) (type: boolean) + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cdate (type: date) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: date) + sort order: + + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: date) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE)) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE)) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_date_test + Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (not (cdate) IN (1969-10-26, 1969-07-14, 1970-01-21)) (type: boolean) + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_date_test + Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568) (type: boolean) + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cdecimal1 (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(20,10)) + sort order: + + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_date_test + Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (not (cdecimal1) IN (2365.8945945946, 881.0135135135, -3367.6517567568)) (type: boolean) + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_date_test + Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: cdate BETWEEN 1969-12-30 AND 1970-01-02 (type: boolean) + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cdate (type: date) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: date) + sort order: + + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: date) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_date_test + Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: cdate NOT BETWEEN 1968-05-01 AND 1971-09-01 (type: boolean) + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cdate (type: date) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: date) + sort order: + + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: date) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_date_test + Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: cdecimal1 BETWEEN -20 AND 45.9918918919 (type: boolean) + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cdecimal1 (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(20,10)) + sort order: + + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(20,10)) + outputColumnNames: _col0 + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: decimal_date_test + Statistics: Num rows: 12288 Data size: 2467616 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 (type: boolean) + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 6144 Data size: 1233808 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) ORDER BY cdate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +1969-07-14 +1969-07-14 +1969-07-14 +1969-07-14 +1969-07-14 +1969-07-14 +1969-07-14 +1969-07-14 +1969-07-14 +1969-07-14 +1969-07-14 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +1969-10-26 +PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE)) +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdate NOT IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE), CAST("1970-01-21" AS DATE)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +5996 +PREHOOK: query: SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 IN (2365.8945945946, 881.0135135135, -3367.6517567568) ORDER BY cdecimal1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +-3367.6517567568 +-3367.6517567568 +-3367.6517567568 +-3367.6517567568 +-3367.6517567568 +-3367.6517567568 +-3367.6517567568 +881.0135135135 +2365.8945945946 +PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568) +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT IN (2365.8945945946, 881.0135135135, -3367.6517567568) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +9165 +PREHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate BETWEEN CAST("1969-12-30" AS DATE) AND CAST("1970-01-02" AS DATE) ORDER BY cdate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +1969-12-30 +1969-12-30 +1969-12-30 +1969-12-30 +1969-12-30 +1969-12-30 +1969-12-30 +1969-12-30 +1969-12-31 +1969-12-31 +1969-12-31 +1969-12-31 +1969-12-31 +1969-12-31 +1969-12-31 +1969-12-31 +1969-12-31 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-01 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +1970-01-02 +PREHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdate FROM decimal_date_test WHERE cdate NOT BETWEEN CAST("1968-05-01" AS DATE) AND CAST("1971-09-01" AS DATE) ORDER BY cdate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +1968-04-07 +1968-04-09 +1968-04-10 +1968-04-14 +1968-04-16 +1968-04-16 +1968-04-19 +1968-04-23 +1968-04-25 +1968-04-26 +1968-04-27 +1968-04-27 +1968-04-27 +1968-04-29 +1968-04-29 +1968-04-29 +1968-04-29 +1968-04-30 +1971-09-02 +1971-09-04 +1971-09-06 +1971-09-06 +1971-09-06 +1971-09-09 +1971-09-09 +1971-09-15 +1971-09-17 +1971-09-18 +1971-09-21 +1971-09-21 +1971-09-21 +1971-09-22 +1971-09-22 +1971-09-25 +PREHOOK: query: SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdecimal1 FROM decimal_date_test WHERE cdecimal1 BETWEEN -20 AND 45.9918918919 ORDER BY cdecimal1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +-18.5162162162 +-17.3216216216 +-16.7243243243 +-16.1270270270 +-15.5297297297 +-10.7513513514 +-9.5567567568 +-8.3621621622 +-5.9729729730 +-3.5837837838 +4.1810810811 +4.7783783784 +4.7783783784 +5.3756756757 +5.9729729730 +5.9729729730 +11.3486486486 +11.3486486486 +11.9459459459 +14.9324324324 +19.1135135135 +20.3081081081 +22.1000000000 +24.4891891892 +33.4486486486 +34.6432432432 +40.0189189189 +42.4081081081 +43.0054054054 +44.2000000000 +44.2000000000 +44.7972972973 +45.9918918919 +PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_date_test +#### A masked pattern was here #### +6172 diff --git a/ql/src/test/results/clientpositive/tez/vector_char_4.q.out b/ql/src/test/results/clientpositive/tez/vector_char_4.q.out new file mode 100644 index 0000000..8daf377 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_char_4.q.out @@ -0,0 +1,175 @@ +PREHOOK: query: drop table if exists vectortab2k +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vectortab2k +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists vectortab2korc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vectortab2korc +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: drop table if exists char_lazy_binary_columnar +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists char_lazy_binary_columnar +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table char_lazy_binary_columnar(ct char(10), csi char(10), ci char(20), cb char(30), cf char(20), cd char(20), cs char(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@char_lazy_binary_columnar +POSTHOOK: query: create table char_lazy_binary_columnar(ct char(10), csi char(10), ci char(20), cb char(30), cf char(20), cd char(20), cs char(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@char_lazy_binary_columnar +PREHOOK: query: explain +insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( t AS CHAR(10) (type: char(10)), CAST( si AS CHAR(10) (type: char(10)), CAST( i AS CHAR(20) (type: char(20)), CAST( b AS CHAR(30) (type: char(30)), CAST( f AS CHAR(20) (type: char(20)), CAST( d AS CHAR(20) (type: char(20)), CAST( s AS CHAR(50) (type: char(50)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + name: default.char_lazy_binary_columnar + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + name: default.char_lazy_binary_columnar + + Stage: Stage-3 + Stats-Aggr Operator + diff --git a/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out b/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out index bac33ec..fe651ca 100644 --- a/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_char_simple.q.out @@ -234,3 +234,109 @@ POSTHOOK: query: drop table char_2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@char_2 POSTHOOK: Output: default@char_2 +PREHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. +create table char_3 ( + field char(12) +) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@char_3 +POSTHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. +create table char_3 ( + field char(12) +) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@char_3 +PREHOOK: query: explain +insert into table char_3 select cint from alltypesorc limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert into table char_3 select cint from alltypesorc limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( _col0 AS CHAR(12) (type: char(12)) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.char_3 + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.char_3 + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert into table char_3 select cint from alltypesorc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@char_3 +POSTHOOK: query: insert into table char_3 select cint from alltypesorc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@char_3 +POSTHOOK: Lineage: char_3.field EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: drop table char_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@char_3 +PREHOOK: Output: default@char_3 +POSTHOOK: query: drop table char_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@char_3 +POSTHOOK: Output: default@char_3 diff --git a/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out new file mode 100644 index 0000000..7c89109 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_count_distinct.q.out @@ -0,0 +1,1366 @@ +PREHOOK: query: create table web_sales_txt +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_web_site_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +row format delimited fields terminated by '|' +stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@web_sales_txt +POSTHOOK: query: create table web_sales_txt +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_web_site_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +row format delimited fields terminated by '|' +stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@web_sales_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/web_sales_2k' OVERWRITE INTO TABLE web_sales_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@web_sales_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/web_sales_2k' OVERWRITE INTO TABLE web_sales_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@web_sales_txt +PREHOOK: query: ------------------------------------------------------------------------------------------ + +create table web_sales +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +partitioned by +( + ws_web_site_sk int +) +stored as orc +tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@web_sales +POSTHOOK: query: ------------------------------------------------------------------------------------------ + +create table web_sales +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +partitioned by +( + ws_web_site_sk int +) +stored as orc +tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@web_sales +PREHOOK: query: insert overwrite table web_sales +partition (ws_web_site_sk) +select ws_sold_date_sk, ws_sold_time_sk, ws_ship_date_sk, ws_item_sk, + ws_bill_customer_sk, ws_bill_cdemo_sk, ws_bill_hdemo_sk, ws_bill_addr_sk, + ws_ship_customer_sk, ws_ship_cdemo_sk, ws_ship_hdemo_sk, ws_ship_addr_sk, + ws_web_page_sk, ws_ship_mode_sk, ws_warehouse_sk, ws_promo_sk, ws_order_number, + ws_quantity, ws_wholesale_cost, ws_list_price, ws_sales_price, ws_ext_discount_amt, + ws_ext_sales_price, ws_ext_wholesale_cost, ws_ext_list_price, ws_ext_tax, + ws_coupon_amt, ws_ext_ship_cost, ws_net_paid, ws_net_paid_inc_tax, ws_net_paid_inc_ship, + ws_net_paid_inc_ship_tax, ws_net_profit, ws_web_site_sk from web_sales_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@web_sales_txt +PREHOOK: Output: default@web_sales +POSTHOOK: query: insert overwrite table web_sales +partition (ws_web_site_sk) +select ws_sold_date_sk, ws_sold_time_sk, ws_ship_date_sk, ws_item_sk, + ws_bill_customer_sk, ws_bill_cdemo_sk, ws_bill_hdemo_sk, ws_bill_addr_sk, + ws_ship_customer_sk, ws_ship_cdemo_sk, ws_ship_hdemo_sk, ws_ship_addr_sk, + ws_web_page_sk, ws_ship_mode_sk, ws_warehouse_sk, ws_promo_sk, ws_order_number, + ws_quantity, ws_wholesale_cost, ws_list_price, ws_sales_price, ws_ext_discount_amt, + ws_ext_sales_price, ws_ext_wholesale_cost, ws_ext_list_price, ws_ext_tax, + ws_coupon_amt, ws_ext_ship_cost, ws_net_paid, ws_net_paid_inc_tax, ws_net_paid_inc_ship, + ws_net_paid_inc_ship_tax, ws_net_profit, ws_web_site_sk from web_sales_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@web_sales_txt +POSTHOOK: Output: default@web_sales@ws_web_site_sk=1 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=10 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=11 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=12 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=13 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=14 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=15 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=16 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=17 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=18 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=19 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=2 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=20 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=21 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=22 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=23 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=24 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=25 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=26 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=27 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=28 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=29 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=3 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=30 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=4 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=5 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=6 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=7 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=8 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=9 +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +PREHOOK: query: ------------------------------------------------------------------------------------------ + +explain +select count(distinct ws_order_number) from web_sales +PREHOOK: type: QUERY +POSTHOOK: query: ------------------------------------------------------------------------------------------ + +explain +select count(distinct ws_order_number) from web_sales +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: web_sales + Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ws_order_number (type: int) + outputColumnNames: ws_order_number + Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(DISTINCT ws_order_number) + keys: ws_order_number (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(DISTINCT KEY._col0:0._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(distinct ws_order_number) from web_sales +PREHOOK: type: QUERY +PREHOOK: Input: default@web_sales +PREHOOK: Input: default@web_sales@ws_web_site_sk=1 +PREHOOK: Input: default@web_sales@ws_web_site_sk=10 +PREHOOK: Input: default@web_sales@ws_web_site_sk=11 +PREHOOK: Input: default@web_sales@ws_web_site_sk=12 +PREHOOK: Input: default@web_sales@ws_web_site_sk=13 +PREHOOK: Input: default@web_sales@ws_web_site_sk=14 +PREHOOK: Input: default@web_sales@ws_web_site_sk=15 +PREHOOK: Input: default@web_sales@ws_web_site_sk=16 +PREHOOK: Input: default@web_sales@ws_web_site_sk=17 +PREHOOK: Input: default@web_sales@ws_web_site_sk=18 +PREHOOK: Input: default@web_sales@ws_web_site_sk=19 +PREHOOK: Input: default@web_sales@ws_web_site_sk=2 +PREHOOK: Input: default@web_sales@ws_web_site_sk=20 +PREHOOK: Input: default@web_sales@ws_web_site_sk=21 +PREHOOK: Input: default@web_sales@ws_web_site_sk=22 +PREHOOK: Input: default@web_sales@ws_web_site_sk=23 +PREHOOK: Input: default@web_sales@ws_web_site_sk=24 +PREHOOK: Input: default@web_sales@ws_web_site_sk=25 +PREHOOK: Input: default@web_sales@ws_web_site_sk=26 +PREHOOK: Input: default@web_sales@ws_web_site_sk=27 +PREHOOK: Input: default@web_sales@ws_web_site_sk=28 +PREHOOK: Input: default@web_sales@ws_web_site_sk=29 +PREHOOK: Input: default@web_sales@ws_web_site_sk=3 +PREHOOK: Input: default@web_sales@ws_web_site_sk=30 +PREHOOK: Input: default@web_sales@ws_web_site_sk=4 +PREHOOK: Input: default@web_sales@ws_web_site_sk=5 +PREHOOK: Input: default@web_sales@ws_web_site_sk=6 +PREHOOK: Input: default@web_sales@ws_web_site_sk=7 +PREHOOK: Input: default@web_sales@ws_web_site_sk=8 +PREHOOK: Input: default@web_sales@ws_web_site_sk=9 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct ws_order_number) from web_sales +POSTHOOK: type: QUERY +POSTHOOK: Input: default@web_sales +POSTHOOK: Input: default@web_sales@ws_web_site_sk=1 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=10 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=11 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=12 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=13 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=14 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=15 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=16 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=17 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=18 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=19 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=2 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=20 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=21 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=22 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=23 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=24 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=25 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=26 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=27 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=28 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=29 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=3 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=30 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=4 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=5 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=6 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=7 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=8 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=9 +#### A masked pattern was here #### +169 diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out index b5f5546..f1b7b8b 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out @@ -113,7 +113,7 @@ POSTHOOK: query: SELECT cint, POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### -0 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 +NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 -3728 6 5831542.2692483780 -3367.6517567568 5817556.0411483778 6 6984454.21109769200000 -4033.445769230769 6967702.86724384584710 -563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 2 -617.56077692307690 -4033.445769230769 -4651.00654615384590 762 2 5831542.2692483780 1531.2194054054 5833073.4886537834 2 6984454.21109769200000 1833.9456923076925 6986288.15678999969250 diff --git a/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out new file mode 100644 index 0000000..1ae5982 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_distinct_2.q.out @@ -0,0 +1,1866 @@ +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: explain +select distinct s, t from vectortab2korc +PREHOOK: type: QUERY +POSTHOOK: query: explain +select distinct s, t from vectortab2korc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: s (type: string), t (type: tinyint) + outputColumnNames: s, t + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: s (type: string), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: tinyint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint) + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: tinyint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select distinct s, t from vectortab2korc +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +POSTHOOK: query: select distinct s, t from vectortab2korc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### + NULL + -116 + -114 + -113 + -108 + -107 + -104 + -91 + -87 + -86 + -85 + -84 + -83 + -80 + -75 + -72 + -66 + -53 + -46 + -38 + -36 + -33 + -29 + -28 + -25 + -24 + -19 + -16 + -14 + -12 + -10 + -5 + -4 + -2 + 0 + 4 + 5 + 8 + 12 + 16 + 19 + 22 + 23 + 26 + 29 + 31 + 37 + 42 + 46 + 48 + 54 + 57 + 59 + 60 + 62 + 64 + 79 + 84 + 87 + 88 + 90 + 91 + 96 + 97 + 102 + 103 + 105 + 109 + 112 + 113 + 114 + 123 +american history NULL +american history -127 +american history -119 +american history -116 +american history -112 +american history -110 +american history -108 +american history -105 +american history -103 +american history -101 +american history -95 +american history -94 +american history -92 +american history -90 +american history -89 +american history -87 +american history -86 +american history -81 +american history -80 +american history -73 +american history -66 +american history -60 +american history -59 +american history -55 +american history -53 +american history -50 +american history -48 +american history -47 +american history -34 +american history -30 +american history -28 +american history -27 +american history -26 +american history -19 +american history -18 +american history -17 +american history -15 +american history -11 +american history -9 +american history 0 +american history 5 +american history 20 +american history 22 +american history 29 +american history 31 +american history 33 +american history 38 +american history 39 +american history 40 +american history 42 +american history 46 +american history 48 +american history 54 +american history 56 +american history 60 +american history 68 +american history 69 +american history 71 +american history 72 +american history 74 +american history 77 +american history 79 +american history 82 +american history 84 +american history 89 +american history 96 +american history 100 +american history 103 +american history 111 +american history 119 +american history 122 +american history 123 +biology NULL +biology -127 +biology -121 +biology -120 +biology -119 +biology -104 +biology -101 +biology -98 +biology -97 +biology -96 +biology -92 +biology -90 +biology -86 +biology -85 +biology -83 +biology -82 +biology -81 +biology -79 +biology -72 +biology -67 +biology -66 +biology -59 +biology -57 +biology -54 +biology -53 +biology -49 +biology -46 +biology -44 +biology -42 +biology -40 +biology -36 +biology -35 +biology -31 +biology -20 +biology -19 +biology -16 +biology -13 +biology -7 +biology -6 +biology -5 +biology 0 +biology 2 +biology 3 +biology 4 +biology 5 +biology 6 +biology 7 +biology 8 +biology 12 +biology 18 +biology 27 +biology 30 +biology 34 +biology 35 +biology 36 +biology 41 +biology 47 +biology 51 +biology 52 +biology 54 +biology 55 +biology 57 +biology 64 +biology 83 +biology 86 +biology 89 +biology 90 +biology 91 +biology 100 +biology 111 +biology 113 +biology 114 +biology 115 +biology 117 +biology 120 +biology 124 +chemistry NULL +chemistry -127 +chemistry -125 +chemistry -122 +chemistry -117 +chemistry -115 +chemistry -110 +chemistry -109 +chemistry -108 +chemistry -103 +chemistry -101 +chemistry -98 +chemistry -95 +chemistry -91 +chemistry -90 +chemistry -89 +chemistry -81 +chemistry -75 +chemistry -68 +chemistry -61 +chemistry -59 +chemistry -57 +chemistry -56 +chemistry -52 +chemistry -46 +chemistry -44 +chemistry -40 +chemistry -37 +chemistry -33 +chemistry -31 +chemistry -30 +chemistry -26 +chemistry -23 +chemistry -8 +chemistry -3 +chemistry -2 +chemistry 3 +chemistry 4 +chemistry 8 +chemistry 14 +chemistry 16 +chemistry 19 +chemistry 25 +chemistry 27 +chemistry 32 +chemistry 37 +chemistry 39 +chemistry 41 +chemistry 46 +chemistry 51 +chemistry 55 +chemistry 64 +chemistry 74 +chemistry 75 +chemistry 78 +chemistry 79 +chemistry 83 +chemistry 88 +chemistry 90 +chemistry 91 +chemistry 96 +chemistry 102 +chemistry 104 +chemistry 110 +chemistry 113 +chemistry 116 +chemistry 124 +chemistry 126 +debate NULL +debate -127 +debate -117 +debate -114 +debate -108 +debate -106 +debate -98 +debate -96 +debate -95 +debate -88 +debate -87 +debate -86 +debate -85 +debate -82 +debate -72 +debate -70 +debate -69 +debate -67 +debate -57 +debate -52 +debate -50 +debate -47 +debate -46 +debate -41 +debate -35 +debate -32 +debate -29 +debate -28 +debate -27 +debate -26 +debate -24 +debate -15 +debate -12 +debate -3 +debate 5 +debate 17 +debate 19 +debate 20 +debate 26 +debate 34 +debate 36 +debate 41 +debate 42 +debate 52 +debate 54 +debate 56 +debate 63 +debate 67 +debate 68 +debate 69 +debate 70 +debate 73 +debate 75 +debate 77 +debate 85 +debate 87 +debate 89 +debate 90 +debate 93 +debate 94 +debate 113 +debate 124 +education NULL +education -127 +education -125 +education -113 +education -111 +education -109 +education -105 +education -101 +education -98 +education -96 +education -93 +education -92 +education -89 +education -87 +education -86 +education -77 +education -72 +education -69 +education -64 +education -61 +education -55 +education -52 +education -44 +education -43 +education -38 +education -37 +education -36 +education -34 +education -28 +education -24 +education -15 +education -13 +education -9 +education 4 +education 12 +education 17 +education 27 +education 33 +education 34 +education 35 +education 38 +education 41 +education 45 +education 46 +education 49 +education 51 +education 53 +education 59 +education 61 +education 66 +education 73 +education 75 +education 77 +education 81 +education 82 +education 85 +education 89 +education 95 +education 102 +education 103 +education 110 +education 111 +education 113 +education 114 +education 116 +education 119 +education 125 +forestry NULL +forestry -126 +forestry -118 +forestry -116 +forestry -109 +forestry -105 +forestry -98 +forestry -95 +forestry -94 +forestry -83 +forestry -81 +forestry -80 +forestry -79 +forestry -77 +forestry -74 +forestry -73 +forestry -68 +forestry -58 +forestry -51 +forestry -50 +forestry -49 +forestry -42 +forestry -34 +forestry -23 +forestry -12 +forestry -11 +forestry -10 +forestry -8 +forestry -3 +forestry -1 +forestry 0 +forestry 7 +forestry 8 +forestry 11 +forestry 12 +forestry 19 +forestry 29 +forestry 31 +forestry 35 +forestry 37 +forestry 39 +forestry 40 +forestry 44 +forestry 45 +forestry 50 +forestry 51 +forestry 52 +forestry 55 +forestry 56 +forestry 71 +forestry 78 +forestry 79 +forestry 87 +forestry 90 +forestry 93 +forestry 94 +forestry 97 +forestry 102 +forestry 106 +forestry 107 +forestry 111 +forestry 115 +forestry 117 +forestry 120 +geology NULL +geology -124 +geology -117 +geology -115 +geology -112 +geology -108 +geology -106 +geology -101 +geology -100 +geology -96 +geology -94 +geology -84 +geology -82 +geology -80 +geology -79 +geology -75 +geology -72 +geology -68 +geology -59 +geology -57 +geology -54 +geology -53 +geology -50 +geology -48 +geology -35 +geology -34 +geology -32 +geology -31 +geology -30 +geology -23 +geology -21 +geology -18 +geology -16 +geology 1 +geology 4 +geology 5 +geology 6 +geology 9 +geology 14 +geology 18 +geology 21 +geology 23 +geology 26 +geology 28 +geology 31 +geology 33 +geology 37 +geology 38 +geology 49 +geology 58 +geology 72 +geology 82 +geology 84 +geology 86 +geology 89 +geology 92 +geology 93 +geology 95 +geology 100 +geology 101 +geology 102 +geology 116 +geology 121 +geology 124 +geology 127 +history NULL +history -125 +history -124 +history -105 +history -104 +history -103 +history -101 +history -98 +history -91 +history -90 +history -89 +history -79 +history -75 +history -67 +history -61 +history -58 +history -54 +history -42 +history -37 +history -34 +history -26 +history -24 +history -19 +history -18 +history -15 +history -12 +history -8 +history -5 +history -1 +history 1 +history 2 +history 7 +history 14 +history 18 +history 25 +history 50 +history 51 +history 52 +history 57 +history 58 +history 59 +history 61 +history 62 +history 71 +history 72 +history 73 +history 75 +history 78 +history 84 +history 92 +history 95 +history 98 +history 100 +history 108 +history 112 +history 113 +history 114 +history 126 +history 127 +industrial engineering NULL +industrial engineering -124 +industrial engineering -110 +industrial engineering -101 +industrial engineering -98 +industrial engineering -96 +industrial engineering -87 +industrial engineering -85 +industrial engineering -72 +industrial engineering -68 +industrial engineering -65 +industrial engineering -58 +industrial engineering -57 +industrial engineering -53 +industrial engineering -49 +industrial engineering -47 +industrial engineering -43 +industrial engineering -38 +industrial engineering -35 +industrial engineering -31 +industrial engineering -30 +industrial engineering -28 +industrial engineering -22 +industrial engineering -11 +industrial engineering -7 +industrial engineering -6 +industrial engineering -5 +industrial engineering -1 +industrial engineering 3 +industrial engineering 7 +industrial engineering 27 +industrial engineering 29 +industrial engineering 32 +industrial engineering 33 +industrial engineering 36 +industrial engineering 42 +industrial engineering 43 +industrial engineering 48 +industrial engineering 54 +industrial engineering 58 +industrial engineering 59 +industrial engineering 70 +industrial engineering 73 +industrial engineering 78 +industrial engineering 79 +industrial engineering 83 +industrial engineering 95 +industrial engineering 96 +industrial engineering 98 +industrial engineering 99 +industrial engineering 102 +industrial engineering 104 +industrial engineering 105 +industrial engineering 106 +industrial engineering 113 +industrial engineering 126 +joggying NULL +joggying -125 +joggying -121 +joggying -119 +joggying -110 +joggying -101 +joggying -100 +joggying -80 +joggying -79 +joggying -77 +joggying -76 +joggying -73 +joggying -69 +joggying -64 +joggying -62 +joggying -61 +joggying -57 +joggying -55 +joggying -48 +joggying -47 +joggying -43 +joggying -40 +joggying -30 +joggying -27 +joggying -24 +joggying -15 +joggying -14 +joggying -8 +joggying -1 +joggying 13 +joggying 20 +joggying 25 +joggying 26 +joggying 27 +joggying 28 +joggying 37 +joggying 43 +joggying 46 +joggying 48 +joggying 49 +joggying 52 +joggying 57 +joggying 61 +joggying 62 +joggying 69 +joggying 70 +joggying 72 +joggying 74 +joggying 80 +joggying 85 +joggying 87 +joggying 92 +joggying 93 +joggying 94 +joggying 97 +joggying 99 +joggying 104 +joggying 105 +joggying 118 +joggying 119 +joggying 121 +joggying 123 +joggying 125 +kindergarten NULL +kindergarten -126 +kindergarten -113 +kindergarten -106 +kindergarten -98 +kindergarten -95 +kindergarten -92 +kindergarten -79 +kindergarten -78 +kindergarten -75 +kindergarten -74 +kindergarten -69 +kindergarten -60 +kindergarten -59 +kindergarten -57 +kindergarten -54 +kindergarten -42 +kindergarten -40 +kindergarten -26 +kindergarten -18 +kindergarten -8 +kindergarten 10 +kindergarten 16 +kindergarten 18 +kindergarten 19 +kindergarten 23 +kindergarten 29 +kindergarten 37 +kindergarten 46 +kindergarten 48 +kindergarten 51 +kindergarten 52 +kindergarten 55 +kindergarten 61 +kindergarten 66 +kindergarten 69 +kindergarten 82 +kindergarten 84 +kindergarten 85 +kindergarten 86 +kindergarten 90 +kindergarten 92 +kindergarten 96 +kindergarten 100 +kindergarten 101 +kindergarten 109 +kindergarten 111 +kindergarten 116 +kindergarten 118 +kindergarten 120 +kindergarten 122 +kindergarten 127 +linguistics NULL +linguistics -127 +linguistics -122 +linguistics -113 +linguistics -101 +linguistics -90 +linguistics -89 +linguistics -87 +linguistics -86 +linguistics -78 +linguistics -77 +linguistics -73 +linguistics -70 +linguistics -69 +linguistics -68 +linguistics -67 +linguistics -53 +linguistics -52 +linguistics -41 +linguistics -34 +linguistics -28 +linguistics -22 +linguistics -20 +linguistics -17 +linguistics -16 +linguistics -14 +linguistics -13 +linguistics -12 +linguistics -6 +linguistics -4 +linguistics -2 +linguistics 0 +linguistics 1 +linguistics 3 +linguistics 6 +linguistics 7 +linguistics 10 +linguistics 11 +linguistics 18 +linguistics 33 +linguistics 37 +linguistics 42 +linguistics 44 +linguistics 50 +linguistics 53 +linguistics 64 +linguistics 67 +linguistics 72 +linguistics 73 +linguistics 83 +linguistics 89 +linguistics 93 +linguistics 96 +linguistics 98 +linguistics 100 +linguistics 113 +linguistics 115 +linguistics 123 +linguistics 125 +linguistics 126 +mathematics NULL +mathematics -127 +mathematics -124 +mathematics -122 +mathematics -120 +mathematics -118 +mathematics -117 +mathematics -103 +mathematics -101 +mathematics -100 +mathematics -99 +mathematics -98 +mathematics -95 +mathematics -91 +mathematics -88 +mathematics -81 +mathematics -79 +mathematics -77 +mathematics -75 +mathematics -66 +mathematics -57 +mathematics -52 +mathematics -50 +mathematics -49 +mathematics -46 +mathematics -45 +mathematics -40 +mathematics -33 +mathematics -31 +mathematics -21 +mathematics -19 +mathematics -7 +mathematics 0 +mathematics 3 +mathematics 6 +mathematics 10 +mathematics 22 +mathematics 23 +mathematics 25 +mathematics 32 +mathematics 35 +mathematics 38 +mathematics 39 +mathematics 46 +mathematics 48 +mathematics 50 +mathematics 53 +mathematics 55 +mathematics 56 +mathematics 58 +mathematics 59 +mathematics 62 +mathematics 63 +mathematics 65 +mathematics 76 +mathematics 79 +mathematics 80 +mathematics 82 +mathematics 87 +mathematics 92 +mathematics 98 +mathematics 102 +mathematics 107 +mathematics 111 +mathematics 114 +nap time NULL +nap time -122 +nap time -119 +nap time -115 +nap time -113 +nap time -104 +nap time -102 +nap time -101 +nap time -91 +nap time -85 +nap time -71 +nap time -61 +nap time -54 +nap time -49 +nap time -45 +nap time -41 +nap time -31 +nap time -15 +nap time -6 +nap time -4 +nap time -2 +nap time 0 +nap time 3 +nap time 6 +nap time 16 +nap time 23 +nap time 27 +nap time 31 +nap time 35 +nap time 42 +nap time 51 +nap time 52 +nap time 56 +nap time 62 +nap time 69 +nap time 70 +nap time 73 +nap time 76 +nap time 80 +nap time 89 +nap time 90 +nap time 92 +nap time 93 +nap time 98 +nap time 103 +nap time 104 +nap time 105 +nap time 107 +nap time 108 +nap time 118 +opthamology NULL +opthamology -122 +opthamology -121 +opthamology -118 +opthamology -111 +opthamology -99 +opthamology -97 +opthamology -91 +opthamology -86 +opthamology -82 +opthamology -79 +opthamology -78 +opthamology -77 +opthamology -75 +opthamology -73 +opthamology -63 +opthamology -59 +opthamology -55 +opthamology -49 +opthamology -48 +opthamology -44 +opthamology -35 +opthamology -33 +opthamology -30 +opthamology -28 +opthamology -27 +opthamology -24 +opthamology -22 +opthamology -18 +opthamology -17 +opthamology -14 +opthamology -8 +opthamology -5 +opthamology 4 +opthamology 5 +opthamology 21 +opthamology 39 +opthamology 41 +opthamology 48 +opthamology 49 +opthamology 52 +opthamology 53 +opthamology 55 +opthamology 68 +opthamology 69 +opthamology 74 +opthamology 76 +opthamology 77 +opthamology 79 +opthamology 81 +opthamology 84 +opthamology 87 +opthamology 88 +opthamology 89 +opthamology 92 +opthamology 96 +opthamology 97 +opthamology 100 +opthamology 104 +opthamology 117 +opthamology 120 +opthamology 122 +opthamology 125 +opthamology 127 +philosophy NULL +philosophy -125 +philosophy -121 +philosophy -119 +philosophy -115 +philosophy -110 +philosophy -105 +philosophy -103 +philosophy -100 +philosophy -99 +philosophy -95 +philosophy -93 +philosophy -92 +philosophy -80 +philosophy -78 +philosophy -77 +philosophy -69 +philosophy -68 +philosophy -61 +philosophy -56 +philosophy -55 +philosophy -53 +philosophy -52 +philosophy -51 +philosophy -50 +philosophy -40 +philosophy -39 +philosophy -27 +philosophy -26 +philosophy -25 +philosophy -17 +philosophy -11 +philosophy 8 +philosophy 20 +philosophy 21 +philosophy 22 +philosophy 29 +philosophy 31 +philosophy 34 +philosophy 38 +philosophy 41 +philosophy 43 +philosophy 45 +philosophy 48 +philosophy 64 +philosophy 67 +philosophy 68 +philosophy 73 +philosophy 83 +philosophy 96 +philosophy 98 +philosophy 104 +philosophy 108 +philosophy 117 +philosophy 118 +philosophy 120 +philosophy 123 +quiet hour NULL +quiet hour -127 +quiet hour -123 +quiet hour -121 +quiet hour -119 +quiet hour -114 +quiet hour -111 +quiet hour -105 +quiet hour -104 +quiet hour -88 +quiet hour -87 +quiet hour -76 +quiet hour -73 +quiet hour -68 +quiet hour -66 +quiet hour -65 +quiet hour -56 +quiet hour -55 +quiet hour -52 +quiet hour -50 +quiet hour -48 +quiet hour -45 +quiet hour -42 +quiet hour -41 +quiet hour -33 +quiet hour -31 +quiet hour -25 +quiet hour -14 +quiet hour -8 +quiet hour -1 +quiet hour 0 +quiet hour 6 +quiet hour 7 +quiet hour 8 +quiet hour 13 +quiet hour 21 +quiet hour 23 +quiet hour 29 +quiet hour 30 +quiet hour 33 +quiet hour 35 +quiet hour 38 +quiet hour 43 +quiet hour 58 +quiet hour 60 +quiet hour 66 +quiet hour 71 +quiet hour 74 +quiet hour 80 +quiet hour 82 +quiet hour 84 +quiet hour 93 +quiet hour 98 +quiet hour 110 +quiet hour 112 +quiet hour 115 +quiet hour 120 +quiet hour 121 +quiet hour 123 +religion NULL +religion -125 +religion -106 +religion -104 +religion -94 +religion -93 +religion -81 +religion -78 +religion -77 +religion -76 +religion -71 +religion -70 +religion -69 +religion -64 +religion -62 +religion -60 +religion -56 +religion -44 +religion -42 +religion -41 +religion -38 +religion -35 +religion -32 +religion -29 +religion -28 +religion -26 +religion -24 +religion -23 +religion -9 +religion -7 +religion -3 +religion 0 +religion 2 +religion 4 +religion 5 +religion 15 +religion 17 +religion 29 +religion 31 +religion 38 +religion 44 +religion 45 +religion 49 +religion 52 +religion 54 +religion 58 +religion 67 +religion 70 +religion 73 +religion 74 +religion 76 +religion 78 +religion 82 +religion 92 +religion 93 +religion 96 +religion 97 +religion 102 +religion 103 +religion 106 +religion 107 +religion 110 +religion 115 +religion 120 +religion 123 +religion 124 +study skills NULL +study skills -127 +study skills -126 +study skills -117 +study skills -107 +study skills -106 +study skills -100 +study skills -88 +study skills -86 +study skills -82 +study skills -81 +study skills -76 +study skills -73 +study skills -65 +study skills -52 +study skills -36 +study skills -33 +study skills -27 +study skills -26 +study skills -22 +study skills -17 +study skills -14 +study skills -13 +study skills -6 +study skills -5 +study skills -4 +study skills -3 +study skills -1 +study skills 2 +study skills 3 +study skills 18 +study skills 21 +study skills 23 +study skills 25 +study skills 28 +study skills 29 +study skills 30 +study skills 35 +study skills 39 +study skills 40 +study skills 47 +study skills 49 +study skills 50 +study skills 54 +study skills 55 +study skills 58 +study skills 62 +study skills 63 +study skills 66 +study skills 68 +study skills 72 +study skills 77 +study skills 80 +study skills 83 +study skills 92 +study skills 95 +study skills 96 +study skills 101 +study skills 106 +study skills 107 +study skills 110 +study skills 115 +study skills 119 +study skills 123 +topology NULL +topology -122 +topology -116 +topology -106 +topology -105 +topology -102 +topology -98 +topology -96 +topology -88 +topology -86 +topology -78 +topology -74 +topology -71 +topology -60 +topology -58 +topology -57 +topology -50 +topology -44 +topology -42 +topology -41 +topology -36 +topology -32 +topology -31 +topology -25 +topology -21 +topology -5 +topology -1 +topology 7 +topology 11 +topology 13 +topology 14 +topology 18 +topology 26 +topology 30 +topology 38 +topology 41 +topology 42 +topology 47 +topology 50 +topology 52 +topology 54 +topology 55 +topology 58 +topology 59 +topology 61 +topology 63 +topology 67 +topology 69 +topology 71 +topology 80 +topology 81 +topology 83 +topology 86 +topology 87 +topology 94 +topology 105 +topology 107 +topology 119 +topology 121 +topology 127 +undecided NULL +undecided -120 +undecided -118 +undecided -117 +undecided -116 +undecided -115 +undecided -114 +undecided -112 +undecided -105 +undecided -104 +undecided -96 +undecided -93 +undecided -90 +undecided -84 +undecided -83 +undecided -78 +undecided -69 +undecided -62 +undecided -53 +undecided -52 +undecided -51 +undecided -43 +undecided -41 +undecided -29 +undecided -28 +undecided -23 +undecided -19 +undecided -13 +undecided -10 +undecided -8 +undecided 0 +undecided 1 +undecided 7 +undecided 11 +undecided 13 +undecided 14 +undecided 27 +undecided 30 +undecided 33 +undecided 37 +undecided 45 +undecided 47 +undecided 50 +undecided 51 +undecided 56 +undecided 69 +undecided 76 +undecided 95 +undecided 97 +undecided 98 +undecided 111 +undecided 114 +undecided 119 +undecided 123 +undecided 124 +values clariffication NULL +values clariffication -123 +values clariffication -119 +values clariffication -114 +values clariffication -109 +values clariffication -108 +values clariffication -107 +values clariffication -105 +values clariffication -100 +values clariffication -98 +values clariffication -97 +values clariffication -95 +values clariffication -92 +values clariffication -90 +values clariffication -81 +values clariffication -75 +values clariffication -70 +values clariffication -69 +values clariffication -67 +values clariffication -63 +values clariffication -62 +values clariffication -60 +values clariffication -55 +values clariffication -51 +values clariffication -50 +values clariffication -48 +values clariffication -46 +values clariffication -42 +values clariffication -40 +values clariffication -38 +values clariffication -37 +values clariffication -31 +values clariffication -28 +values clariffication -8 +values clariffication -6 +values clariffication -5 +values clariffication 4 +values clariffication 8 +values clariffication 9 +values clariffication 10 +values clariffication 12 +values clariffication 14 +values clariffication 15 +values clariffication 21 +values clariffication 23 +values clariffication 30 +values clariffication 32 +values clariffication 42 +values clariffication 50 +values clariffication 53 +values clariffication 56 +values clariffication 57 +values clariffication 62 +values clariffication 70 +values clariffication 74 +values clariffication 80 +values clariffication 85 +values clariffication 92 +values clariffication 93 +values clariffication 96 +values clariffication 97 +values clariffication 98 +values clariffication 108 +values clariffication 118 +values clariffication 120 +values clariffication 122 +values clariffication 124 +values clariffication 126 +values clariffication 127 +wind surfing NULL +wind surfing -124 +wind surfing -121 +wind surfing -117 +wind surfing -116 +wind surfing -113 +wind surfing -111 +wind surfing -104 +wind surfing -102 +wind surfing -99 +wind surfing -98 +wind surfing -96 +wind surfing -83 +wind surfing -80 +wind surfing -78 +wind surfing -71 +wind surfing -65 +wind surfing -60 +wind surfing -57 +wind surfing -56 +wind surfing -42 +wind surfing -39 +wind surfing -38 +wind surfing -34 +wind surfing -31 +wind surfing -30 +wind surfing -21 +wind surfing -18 +wind surfing -14 +wind surfing -12 +wind surfing -8 +wind surfing -6 +wind surfing -2 +wind surfing 5 +wind surfing 11 +wind surfing 15 +wind surfing 16 +wind surfing 20 +wind surfing 21 +wind surfing 22 +wind surfing 26 +wind surfing 29 +wind surfing 33 +wind surfing 36 +wind surfing 37 +wind surfing 40 +wind surfing 41 +wind surfing 44 +wind surfing 45 +wind surfing 48 +wind surfing 52 +wind surfing 53 +wind surfing 54 +wind surfing 58 +wind surfing 59 +wind surfing 62 +wind surfing 63 +wind surfing 64 +wind surfing 66 +wind surfing 68 +wind surfing 71 +wind surfing 75 +wind surfing 78 +wind surfing 86 +wind surfing 88 +wind surfing 92 +wind surfing 96 +wind surfing 101 +wind surfing 102 +wind surfing 107 +wind surfing 108 +wind surfing 111 +wind surfing 112 +wind surfing 121 +xylophone band NULL +xylophone band -115 +xylophone band -112 +xylophone band -109 +xylophone band -104 +xylophone band -101 +xylophone band -98 +xylophone band -96 +xylophone band -94 +xylophone band -91 +xylophone band -78 +xylophone band -75 +xylophone band -72 +xylophone band -70 +xylophone band -68 +xylophone band -59 +xylophone band -46 +xylophone band -27 +xylophone band -26 +xylophone band -23 +xylophone band -15 +xylophone band -13 +xylophone band -10 +xylophone band -6 +xylophone band -3 +xylophone band -2 +xylophone band 0 +xylophone band 3 +xylophone band 6 +xylophone band 7 +xylophone band 9 +xylophone band 11 +xylophone band 14 +xylophone band 17 +xylophone band 19 +xylophone band 22 +xylophone band 23 +xylophone band 33 +xylophone band 36 +xylophone band 38 +xylophone band 40 +xylophone band 52 +xylophone band 60 +xylophone band 62 +xylophone band 77 +xylophone band 79 +xylophone band 84 +xylophone band 85 +xylophone band 88 +xylophone band 92 +xylophone band 94 +xylophone band 107 +xylophone band 108 +xylophone band 112 +xylophone band 118 +xylophone band 122 +xylophone band 123 +xylophone band 125 +xylophone band 127 +yard duty NULL +yard duty -127 +yard duty -115 +yard duty -114 +yard duty -109 +yard duty -103 +yard duty -102 +yard duty -100 +yard duty -98 +yard duty -93 +yard duty -91 +yard duty -88 +yard duty -86 +yard duty -85 +yard duty -82 +yard duty -76 +yard duty -62 +yard duty -61 +yard duty -57 +yard duty -53 +yard duty -51 +yard duty -50 +yard duty -49 +yard duty -48 +yard duty -46 +yard duty -45 +yard duty -44 +yard duty -33 +yard duty -28 +yard duty -27 +yard duty -21 +yard duty -19 +yard duty -17 +yard duty -15 +yard duty -1 +yard duty 8 +yard duty 9 +yard duty 10 +yard duty 15 +yard duty 18 +yard duty 22 +yard duty 25 +yard duty 28 +yard duty 30 +yard duty 34 +yard duty 42 +yard duty 48 +yard duty 53 +yard duty 55 +yard duty 57 +yard duty 60 +yard duty 64 +yard duty 65 +yard duty 78 +yard duty 86 +yard duty 90 +yard duty 102 +yard duty 105 +yard duty 110 +zync studies NULL +zync studies -127 +zync studies -117 +zync studies -116 +zync studies -111 +zync studies -105 +zync studies -103 +zync studies -102 +zync studies -94 +zync studies -90 +zync studies -83 +zync studies -79 +zync studies -78 +zync studies -68 +zync studies -61 +zync studies -58 +zync studies -57 +zync studies -54 +zync studies -52 +zync studies -50 +zync studies -45 +zync studies -40 +zync studies -39 +zync studies -38 +zync studies -37 +zync studies -36 +zync studies -35 +zync studies -34 +zync studies -33 +zync studies -32 +zync studies -27 +zync studies -26 +zync studies -21 +zync studies -20 +zync studies -12 +zync studies 1 +zync studies 7 +zync studies 9 +zync studies 11 +zync studies 15 +zync studies 19 +zync studies 31 +zync studies 37 +zync studies 48 +zync studies 59 +zync studies 63 +zync studies 70 +zync studies 87 +zync studies 90 +zync studies 91 +zync studies 98 +zync studies 99 +zync studies 100 +zync studies 116 +zync studies 120 diff --git a/ql/src/test/results/clientpositive/tez/vector_elt.q.out b/ql/src/test/results/clientpositive/tez/vector_elt.q.out new file mode 100644 index 0000000..d7a46e1 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_elt.q.out @@ -0,0 +1,121 @@ +PREHOOK: query: EXPLAIN SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) +FROM alltypesorc +WHERE ctinyint > 0 LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) +FROM alltypesorc +WHERE ctinyint > 0 LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: (ctinyint > 0) (type: boolean) + Select Operator + expressions: ((ctinyint % 2) + 1) (type: int), cstring1 (type: string), cint (type: int), elt(((ctinyint % 2) + 1), cstring1, cint) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) +FROM alltypesorc +WHERE ctinyint > 0 LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT (ctinyint % 2) + 1, cstring1, cint, elt((ctinyint % 2) + 1, cstring1, cint) +FROM alltypesorc +WHERE ctinyint > 0 LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +2 cvLH6Eat2yFsyy7p 528534767 528534767 +2 cvLH6Eat2yFsyy7p 528534767 528534767 +2 cvLH6Eat2yFsyy7p 528534767 528534767 +2 cvLH6Eat2yFsyy7p 528534767 528534767 +2 cvLH6Eat2yFsyy7p 528534767 528534767 +1 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +2 cvLH6Eat2yFsyy7p 528534767 528534767 +1 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +2 cvLH6Eat2yFsyy7p 528534767 528534767 +1 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +PREHOOK: query: EXPLAIN +SELECT elt(2, 'abc', 'defg'), + elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'), + elt('1', 'abc', 'defg'), + elt(2, 'aa', CAST('2' AS TINYINT)), + elt(2, 'aa', CAST('12345' AS SMALLINT)), + elt(2, 'aa', CAST('123456789012' AS BIGINT)), + elt(2, 'aa', CAST(1.25 AS FLOAT)), + elt(2, 'aa', CAST(16.0 AS DOUBLE)), + elt(0, 'abc', 'defg'), + elt(3, 'abc', 'defg') +FROM alltypesorc LIMIT 1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT elt(2, 'abc', 'defg'), + elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'), + elt('1', 'abc', 'defg'), + elt(2, 'aa', CAST('2' AS TINYINT)), + elt(2, 'aa', CAST('12345' AS SMALLINT)), + elt(2, 'aa', CAST('123456789012' AS BIGINT)), + elt(2, 'aa', CAST(1.25 AS FLOAT)), + elt(2, 'aa', CAST(16.0 AS DOUBLE)), + elt(0, 'abc', 'defg'), + elt(3, 'abc', 'defg') +FROM alltypesorc LIMIT 1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + TableScan + alias: alltypesorc + Select Operator + expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: void), null (type: void) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 + Limit + Number of rows: 1 + ListSink + +PREHOOK: query: SELECT elt(2, 'abc', 'defg'), + elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'), + elt('1', 'abc', 'defg'), + elt(2, 'aa', CAST('2' AS TINYINT)), + elt(2, 'aa', CAST('12345' AS SMALLINT)), + elt(2, 'aa', CAST('123456789012' AS BIGINT)), + elt(2, 'aa', CAST(1.25 AS FLOAT)), + elt(2, 'aa', CAST(16.0 AS DOUBLE)), + elt(0, 'abc', 'defg'), + elt(3, 'abc', 'defg') +FROM alltypesorc LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT elt(2, 'abc', 'defg'), + elt(3, 'aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg'), + elt('1', 'abc', 'defg'), + elt(2, 'aa', CAST('2' AS TINYINT)), + elt(2, 'aa', CAST('12345' AS SMALLINT)), + elt(2, 'aa', CAST('123456789012' AS BIGINT)), + elt(2, 'aa', CAST(1.25 AS FLOAT)), + elt(2, 'aa', CAST(16.0 AS DOUBLE)), + elt(0, 'abc', 'defg'), + elt(3, 'abc', 'defg') +FROM alltypesorc LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +defg cc abc 2 12345 123456789012 1.25 16.0 NULL NULL diff --git a/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out new file mode 100644 index 0000000..491c6f7 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_groupby_3.q.out @@ -0,0 +1,1869 @@ +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: explain +select s, t, max(b) from vectortab2korc group by s, t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select s, t, max(b) from vectortab2korc group by s, t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: s (type: string), t (type: tinyint), b (type: bigint) + outputColumnNames: s, t, b + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(b) + keys: s (type: string), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: tinyint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint) + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: tinyint), _col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select s, t, max(b) from vectortab2korc group by s, t +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +POSTHOOK: query: select s, t, max(b) from vectortab2korc group by s, t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### + NULL 2714 + -116 8945302550165004288 + -114 1312 + -113 7569249672628789248 + -108 8939431770838810624 + -107 1906 + -104 8268875586442256384 + -91 3965 + -87 7078641038157643776 + -86 2862 + -85 -7139677575412686848 + -84 -7085247548404178944 + -83 8900351886974279680 + -80 -8938849835283677184 + -75 NULL + -72 9149216169284091904 + -66 3197 + -53 -8619303037130301440 + -46 -9080568167841226752 + -38 2933 + -36 -7512297136103800832 + -33 1075 + -29 8371939471056470016 + -28 -7292078334519894016 + -25 2262 + -24 898 + -19 9094945190752903168 + -16 -7673901622181953536 + -14 7983789401706094592 + -12 -7413317118463164416 + -10 8991442360387584000 + -5 3534 + -4 -9014145341570203648 + -2 8759184090543857664 + 0 8163948965373386752 + 4 7062382339142156288 + 5 -8703026916864802816 + 8 2905 + 12 2553 + 16 8983857919580209152 + 19 2201 + 22 9191943992860327936 + 23 NULL + 26 -7916510129632296960 + 29 8555933456197828608 + 31 2941 + 37 8143462899383345152 + 42 NULL + 46 3174 + 48 -8566856504746352640 + 54 -7242345057866285056 + 57 8230371298967609344 + 59 2494 + 60 -6986178228432322560 + 62 779 + 64 7368920486374989824 + 79 8736061027343859712 + 84 NULL + 87 797 + 88 8782900615468302336 + 90 2977 + 91 2193 + 96 -7866079955473989632 + 97 2017 + 102 -7036607470351654912 + 103 780 + 105 -7511952204985049088 + 109 1436 + 112 -7063777488249085952 + 113 8775009214012456960 + 114 1145 + 123 3625 +american history NULL 3555 +american history -127 1719 +american history -119 19 +american history -116 3812 +american history -112 7731443941834678272 +american history -110 -8989473881707921408 +american history -108 7359004378440146944 +american history -105 7870277756614623232 +american history -103 -6962271229404348416 +american history -101 7348598907182800896 +american history -95 NULL +american history -94 9119046173224370176 +american history -92 -7612455481940246528 +american history -90 1321 +american history -89 -7571293705217687552 +american history -87 -7536330682873937920 +american history -86 -8731068123910987776 +american history -81 2700 +american history -80 485 +american history -73 2487 +american history -66 -8368487814665895936 +american history -60 1566 +american history -59 3949 +american history -55 8407869317250220032 +american history -53 1955 +american history -50 7857878068300898304 +american history -48 -8016589197379289088 +american history -47 -8566940231897874432 +american history -34 4018 +american history -30 -8789178184387641344 +american history -28 -7720966287634112512 +american history -27 NULL +american history -26 -7057750467944931328 +american history -19 78 +american history -18 -8659643752269242368 +american history -17 2968 +american history -15 9048002942653710336 +american history -11 8714829359200747520 +american history -9 -8503342882470019072 +american history 0 6928080429732536320 +american history 5 135 +american history 20 -7037375807670501376 +american history 22 8467976965865799680 +american history 29 -8243487285852766208 +american history 31 -8007017894942638080 +american history 33 9043089884440068096 +american history 38 7061809776248545280 +american history 39 7922443154272395264 +american history 40 2886 +american history 42 8190967051000659968 +american history 46 8374321007870836736 +american history 48 6962726713896484864 +american history 54 -7784419454650843136 +american history 56 1890 +american history 60 3512 +american history 68 -8067243114610532352 +american history 69 -7199983995864711168 +american history 71 8698055291501543424 +american history 72 8551446856960942080 +american history 74 9038087402564657152 +american history 77 1837 +american history 79 -7542857121910046720 +american history 82 3980 +american history 84 2105 +american history 89 8806507556248731648 +american history 96 823 +american history 100 8760285623204290560 +american history 103 -7868306678534193152 +american history 111 522 +american history 119 8571268359622172672 +american history 122 7497276415392407552 +american history 123 -7488345684795342848 +biology NULL 9116137265342169088 +biology -127 1270 +biology -121 8109381965028548608 +biology -120 8388363436324085760 +biology -119 7054938591408996352 +biology -104 -8683802826440105984 +biology -101 -7198372044947275776 +biology -98 -8940944155843461120 +biology -97 -7827420207675105280 +biology -96 NULL +biology -92 1142 +biology -90 7775034125776363520 +biology -86 3755 +biology -85 -6935548339131138048 +biology -83 7232273749940838400 +biology -82 -7040248820505149440 +biology -81 NULL +biology -79 3566 +biology -72 -7915999634274369536 +biology -67 8532016240026279936 +biology -66 -7623047151287754752 +biology -59 7534042483076857856 +biology -57 7049773031131283456 +biology -54 -7319315187617587200 +biology -53 -6962292590214234112 +biology -49 NULL +biology -46 2241 +biology -44 2680 +biology -42 2469 +biology -40 -7469660864676585472 +biology -36 1489 +biology -35 2285 +biology -31 -9032650742739836928 +biology -20 7130159794259353600 +biology -19 -7442593976514420736 +biology -16 3960 +biology -13 2463 +biology -7 -8665218198816497664 +biology -6 888 +biology -5 808 +biology 0 523 +biology 2 -8387347109404286976 +biology 3 -9075486079396069376 +biology 4 482 +biology 5 7376467688511455232 +biology 6 -7797149520019062784 +biology 7 454 +biology 8 950 +biology 12 2492 +biology 18 -7255010240787030016 +biology 27 -9049720998034137088 +biology 30 835 +biology 34 2517 +biology 35 8017403886247927808 +biology 36 2539 +biology 41 7052226236896256000 +biology 47 3974 +biology 51 3728 +biology 52 9084402694981533696 +biology 54 -7330203470474985472 +biology 55 1785 +biology 57 9083704659251798016 +biology 64 2400 +biology 83 -8172827216441573376 +biology 86 3199 +biology 89 7747874976739016704 +biology 90 -7598782894648565760 +biology 91 -8131997716860526592 +biology 100 -8782213262837530624 +biology 111 6933731240564056064 +biology 113 664 +biology 114 1343 +biology 115 -7017212700635545600 +biology 117 120 +biology 120 3443 +biology 124 -6919476845891313664 +chemistry NULL 8854677881758162944 +chemistry -127 -7637755520917741568 +chemistry -125 3418 +chemistry -122 -8232763638546694144 +chemistry -117 8014986215157530624 +chemistry -115 3307 +chemistry -110 3430 +chemistry -109 -7624057992767782912 +chemistry -108 7255302164215013376 +chemistry -103 -8099313480512716800 +chemistry -101 2599 +chemistry -98 9005866015985713152 +chemistry -95 -7470307155642245120 +chemistry -91 9030480306789818368 +chemistry -90 1291 +chemistry -89 2968 +chemistry -81 -8664806103426252800 +chemistry -75 375 +chemistry -68 -8813211231120031744 +chemistry -61 -8021859935185928192 +chemistry -59 -7572262898020278272 +chemistry -57 -7362189611124563968 +chemistry -56 -7663293054873812992 +chemistry -52 4056 +chemistry -46 -7610137349734883328 +chemistry -44 7596563216912211968 +chemistry -40 4054 +chemistry -37 NULL +chemistry -33 7545689659010949120 +chemistry -31 -9080956291212132352 +chemistry -30 2001 +chemistry -26 -9178166810751909888 +chemistry -23 -8518060755719585792 +chemistry -8 8487573502287478784 +chemistry -3 3245 +chemistry -2 -7500200359698907136 +chemistry 3 2933 +chemistry 4 2138 +chemistry 8 873 +chemistry 14 7198687580227043328 +chemistry 16 7229607057201127424 +chemistry 19 NULL +chemistry 25 236 +chemistry 27 1826 +chemistry 32 1775 +chemistry 37 41 +chemistry 39 2663 +chemistry 41 8693036785094565888 +chemistry 46 1280 +chemistry 51 392 +chemistry 55 -8843859708698583040 +chemistry 64 9091085792947666944 +chemistry 74 837 +chemistry 75 3860 +chemistry 78 2476 +chemistry 79 8644602243484803072 +chemistry 83 1286 +chemistry 88 -8082793390939193344 +chemistry 90 7794244032613703680 +chemistry 91 3866 +chemistry 96 -7395343938785738752 +chemistry 102 -7907355742053883904 +chemistry 104 3609 +chemistry 110 NULL +chemistry 113 7149417430082027520 +chemistry 116 7768984605670604800 +chemistry 124 1914 +chemistry 126 9083076230151864320 +debate NULL 1777 +debate -127 8337549596011102208 +debate -117 -8269917980278980608 +debate -114 2085 +debate -108 2816 +debate -106 1948 +debate -98 8011602724663336960 +debate -96 8688483860094599168 +debate -95 1368 +debate -88 8345435427356090368 +debate -87 -8028910243475038208 +debate -86 1095 +debate -85 2341 +debate -82 NULL +debate -72 7720187583697502208 +debate -70 8928133990107881472 +debate -69 7948803266578161664 +debate -67 1521 +debate -57 -8581979259158929408 +debate -52 7947544013461512192 +debate -50 7061498706968428544 +debate -47 49 +debate -46 1234 +debate -41 -8856821118526734336 +debate -35 7062605127422894080 +debate -32 -7831320202242228224 +debate -29 -7476082621253402624 +debate -28 -8379109122834997248 +debate -27 3835 +debate -26 471 +debate -24 8554899472487596032 +debate -15 34 +debate -12 7682327310082531328 +debate -3 8519937082746634240 +debate 5 -8649711322250362880 +debate 17 8560526613401714688 +debate 19 8501910015960735744 +debate 20 -8832750849949892608 +debate 26 738 +debate 34 3060 +debate 36 7792036342592348160 +debate 41 7039820685967343616 +debate 42 8557218322962644992 +debate 52 -7637494527844343808 +debate 54 7534145866886782976 +debate 56 -7161165959057334272 +debate 63 1243 +debate 67 2692 +debate 68 8045070943673671680 +debate 69 -7822452149325094912 +debate 70 7534549597202194432 +debate 73 8372408423196270592 +debate 75 2463 +debate 77 1168 +debate 85 8822384228057604096 +debate 87 8146288732715196416 +debate 89 2089 +debate 90 9078604269481148416 +debate 93 455 +debate 94 -7506254246954500096 +debate 113 1995 +debate 124 3691 +education NULL 2903 +education -127 -8302817097848307712 +education -125 2214 +education -113 -8142667274351345664 +education -111 7296164580491075584 +education -109 8150115791664340992 +education -105 1066 +education -101 7165364563962191872 +education -98 1341 +education -96 -8022573309127000064 +education -93 7343171468838567936 +education -92 2811 +education -89 1999 +education -87 345 +education -86 8473699639908261888 +education -77 9148071980848742400 +education -72 3021 +education -69 2805 +education -64 6974475559697768448 +education -61 7917494645725765632 +education -55 7989119273552158720 +education -52 9050032047355125760 +education -44 -8219876839318716416 +education -43 3911 +education -38 3749 +education -37 923 +education -36 694 +education -34 -7707242953271500800 +education -28 7735566678126616576 +education -24 1509 +education -15 -6960947572095770624 +education -13 695 +education -9 928 +education 4 -8034414142083170304 +education 12 8454154705460666368 +education 17 2232 +education 27 -8454143651040444416 +education 33 -8488247955875618816 +education 34 379 +education 35 7473537548003352576 +education 38 -8244116388227104768 +education 41 7584007864107778048 +education 45 238 +education 46 7892281003266408448 +education 49 8461498293348065280 +education 51 -7623359796281999360 +education 53 -7600138468036386816 +education 59 7432998950057975808 +education 61 NULL +education 66 7652123583449161728 +education 73 -7185369278665605120 +education 75 9054887854393950208 +education 77 3206 +education 81 -6970396058557005824 +education 82 1058 +education 85 7961909238130270208 +education 89 3409 +education 95 3764 +education 102 7375521127126089728 +education 103 7384150968511315968 +education 110 7452756603516190720 +education 111 3824 +education 113 8994608999945125888 +education 114 1846 +education 116 -8930307926221807616 +education 119 7548958830580563968 +education 125 8577096957495025664 +forestry NULL 9123116008004288512 +forestry -126 9086905513121890304 +forestry -118 8272001752345690112 +forestry -116 7997694023324975104 +forestry -109 275 +forestry -105 2724 +forestry -98 3089 +forestry -95 2485 +forestry -94 -8051871680800120832 +forestry -83 2752 +forestry -81 -8922409715403112448 +forestry -80 1165 +forestry -79 1937 +forestry -77 1247 +forestry -74 1643 +forestry -73 -7228589258642194432 +forestry -68 -6988970700649168896 +forestry -58 7989160253372817408 +forestry -51 -9002912355472736256 +forestry -50 2283 +forestry -49 417 +forestry -42 268 +forestry -34 2524 +forestry -23 7790728456522784768 +forestry -12 -8205148279289085952 +forestry -11 9194388393453060096 +forestry -10 3962 +forestry -8 7955126053367119872 +forestry -3 1165 +forestry -1 -9071565764086521856 +forestry 0 -7603467428164009984 +forestry 7 3118 +forestry 8 7299197687217856512 +forestry 11 3467 +forestry 12 8160662610166194176 +forestry 19 -8760655406971863040 +forestry 29 -7831595638727565312 +forestry 31 3663 +forestry 35 1371 +forestry 37 950 +forestry 39 6933451028794925056 +forestry 40 -8763062627136864256 +forestry 44 -8559252110266564608 +forestry 45 8553195689344991232 +forestry 50 615 +forestry 51 2580 +forestry 52 8514851182589771776 +forestry 55 -9091113592821972992 +forestry 56 7418271723644403712 +forestry 71 8779711700787298304 +forestry 78 -7845896959112658944 +forestry 79 -7818454479651135488 +forestry 87 1981 +forestry 90 8829545979081744384 +forestry 93 1422 +forestry 94 -8469607298426437632 +forestry 97 -8845239510002753536 +forestry 102 255 +forestry 106 8570983266408103936 +forestry 107 999 +forestry 111 -7138415011665043456 +forestry 115 8573305425181941760 +forestry 117 -7120456708338688000 +forestry 120 7784169796350730240 +geology NULL 8171188598958407680 +geology -124 3190 +geology -117 3421 +geology -115 -8660149447361404928 +geology -112 1462 +geology -108 -8604758220106014720 +geology -106 -7661250850555633664 +geology -101 -7078068944081002496 +geology -100 3002 +geology -96 -6957946688477274112 +geology -94 268 +geology -84 -7511202710200885248 +geology -82 8817665768680906752 +geology -80 3446 +geology -79 1614 +geology -75 -7883252982752665600 +geology -72 8391785334471589888 +geology -68 -8203008052020879360 +geology -59 7621013099259527168 +geology -57 -7594824008626372608 +geology -54 -7328087811698909184 +geology -53 -6988811476286873600 +geology -50 8120593157178228736 +geology -48 1342 +geology -35 -7510418793070075904 +geology -34 8708845895460577280 +geology -32 7487538600082554880 +geology -31 871 +geology -30 -8127494999848919040 +geology -23 1537 +geology -21 91 +geology -18 7370803940448305152 +geology -16 8979012655944220672 +geology 1 -8870673219965001728 +geology 4 9020143715350814720 +geology 5 7784489776013295616 +geology 6 3203 +geology 9 -8379964450833367040 +geology 14 -8136227554401107968 +geology 18 3763 +geology 21 8850055384477401088 +geology 23 NULL +geology 26 -7145585429014888448 +geology 28 2183 +geology 31 1892 +geology 33 7689489436826804224 +geology 37 2762 +geology 38 -8395998375405912064 +geology 49 8854715632851345408 +geology 58 -8856151919723003904 +geology 72 2179 +geology 82 6967631925774639104 +geology 84 8316336224427483136 +geology 86 -9101953184875757568 +geology 89 1346 +geology 92 -7833618000492109824 +geology 93 NULL +geology 95 -7838598833900584960 +geology 100 7528074274555305984 +geology 101 7701723309715685376 +geology 102 -8297230235506343936 +geology 116 3073 +geology 121 213 +geology 124 7779486624537370624 +geology 127 -7703540456272994304 +history NULL 3231 +history -125 1856 +history -124 -8318886086186213376 +history -105 3701 +history -104 1769 +history -103 -7623405558242500608 +history -101 154 +history -98 2046 +history -91 1651 +history -90 65 +history -89 3418 +history -79 1796 +history -75 -8783777723063099392 +history -67 -7679894005808693248 +history -61 8325227661920133120 +history -58 8376440110255243264 +history -54 2393 +history -42 8987827141270880256 +history -37 3946 +history -34 -7192529627893858304 +history -26 NULL +history -24 3079 +history -19 1791 +history -18 3770 +history -15 2013 +history -12 154 +history -8 2187 +history -5 1667 +history -1 -8544299740525461504 +history 1 8637720762289659904 +history 2 7648729477297987584 +history 7 7647481735646363648 +history 14 6947488599548215296 +history 18 -7778829032042790912 +history 25 3866 +history 50 3874 +history 51 8135164922674872320 +history 52 -6921654334727036928 +history 57 3690 +history 58 3664 +history 59 7514552840617558016 +history 61 -8411282676082565120 +history 62 3024 +history 71 -8730803262481580032 +history 72 7099005292698550272 +history 73 2776 +history 75 -7547245548870025216 +history 78 -9102482277760983040 +history 84 -7162299524557471744 +history 92 921 +history 95 3769 +history 98 3588 +history 100 263 +history 108 -7800879252150779904 +history 112 -7419068456205385728 +history 113 3728 +history 114 289 +history 126 2810 +history 127 -7884460946615984128 +industrial engineering NULL 3060 +industrial engineering -124 3249 +industrial engineering -110 2560 +industrial engineering -101 -7617860842651017216 +industrial engineering -98 8368012468775608320 +industrial engineering -96 688 +industrial engineering -87 7486884806277611520 +industrial engineering -85 -7512289590991544320 +industrial engineering -72 3208 +industrial engineering -68 -9206329156028112896 +industrial engineering -65 9112400579327483904 +industrial engineering -58 650 +industrial engineering -57 -8521578237232529408 +industrial engineering -53 68 +industrial engineering -49 181 +industrial engineering -47 2911 +industrial engineering -43 504 +industrial engineering -38 -8581765103969312768 +industrial engineering -35 1726 +industrial engineering -31 1520 +industrial engineering -30 3472 +industrial engineering -28 724 +industrial engineering -22 -7540104552219860992 +industrial engineering -11 -7802538500225777664 +industrial engineering -7 2420 +industrial engineering -6 9185458640237641728 +industrial engineering -5 2485 +industrial engineering -1 8087737899452432384 +industrial engineering 3 3682 +industrial engineering 7 7573530789362262016 +industrial engineering 27 -8445801063348281344 +industrial engineering 29 3365 +industrial engineering 32 364 +industrial engineering 33 -7240213957902663680 +industrial engineering 36 1158 +industrial engineering 42 2506 +industrial engineering 43 3725 +industrial engineering 48 355 +industrial engineering 54 691 +industrial engineering 58 8195103847607967744 +industrial engineering 59 8808467247666241536 +industrial engineering 70 9091082386452684800 +industrial engineering 73 -7011425384222244864 +industrial engineering 78 -9136398397785948160 +industrial engineering 79 -8948335470186373120 +industrial engineering 83 NULL +industrial engineering 95 8192304692696383488 +industrial engineering 96 -7910019233726242816 +industrial engineering 98 -7524170566881329152 +industrial engineering 99 -9215144824304721920 +industrial engineering 102 2900 +industrial engineering 104 -8875546987176206336 +industrial engineering 105 8071961599867387904 +industrial engineering 106 878 +industrial engineering 113 1862 +industrial engineering 126 -6968892545529896960 +joggying NULL -8877053610728161280 +joggying -125 7823874904139849728 +joggying -121 3103 +joggying -119 2189 +joggying -110 -8870186814744420352 +joggying -101 -8675661101615489024 +joggying -100 7080269176324218880 +joggying -80 8254763178969915392 +joggying -79 -7939634346485858304 +joggying -77 7626715182847090688 +joggying -76 NULL +joggying -73 8011181697250631680 +joggying -69 7678790769408172032 +joggying -64 2373 +joggying -62 -8425998949410889728 +joggying -61 8687042963221159936 +joggying -57 8936639033158410240 +joggying -55 8208354137450766336 +joggying -48 8351163199364390912 +joggying -47 NULL +joggying -43 8323460620425330688 +joggying -40 3781 +joggying -30 2217 +joggying -27 2790 +joggying -24 -7083646746411720704 +joggying -15 -8658387566611996672 +joggying -14 -8358130693961195520 +joggying -8 8723248113030782976 +joggying -1 7844258063629852672 +joggying 13 8525894870444638208 +joggying 20 425 +joggying 25 1556 +joggying 26 2325 +joggying 27 1290 +joggying 28 -8858063395050110976 +joggying 37 -7378096180613840896 +joggying 43 7260908278294560768 +joggying 46 8905330479248064512 +joggying 48 1337 +joggying 49 -8047774491688255488 +joggying 52 2803 +joggying 57 8183233196086214656 +joggying 61 3253 +joggying 62 -8359839265974165504 +joggying 69 8302473563519950848 +joggying 70 1965 +joggying 72 976 +joggying 74 -7751265769984491520 +joggying 80 7454442625055145984 +joggying 85 7748799008146366464 +joggying 87 94 +joggying 92 7818464507324121088 +joggying 93 8416121695917498368 +joggying 94 7599019810193211392 +joggying 97 2565 +joggying 99 1863 +joggying 104 1864 +joggying 105 2002 +joggying 118 -8108693586698706944 +joggying 119 -7892780594910871552 +joggying 121 1987 +joggying 123 NULL +joggying 125 2842 +kindergarten NULL 6933001829416034304 +kindergarten -126 2509 +kindergarten -113 259 +kindergarten -106 -8103788088118018048 +kindergarten -98 982 +kindergarten -95 8983912573761167360 +kindergarten -92 NULL +kindergarten -79 -7751427073017544704 +kindergarten -78 7524958388842078208 +kindergarten -75 7017956982081404928 +kindergarten -74 -8632237187473088512 +kindergarten -69 1813 +kindergarten -60 7027529814236192768 +kindergarten -59 8991071342495531008 +kindergarten -57 -7949309059286163456 +kindergarten -54 8896237972875370496 +kindergarten -42 -7094827141662539776 +kindergarten -40 7084659344078970880 +kindergarten -26 7226360892091416576 +kindergarten -18 7696737688942567424 +kindergarten -8 -7420448501073051648 +kindergarten 10 3111 +kindergarten 16 7753882935005880320 +kindergarten 18 -7395553021620731904 +kindergarten 19 -8104684579106914304 +kindergarten 23 8871707618793996288 +kindergarten 29 3248 +kindergarten 37 3493 +kindergarten 46 958 +kindergarten 48 -8572949572756774912 +kindergarten 51 8543177193114779648 +kindergarten 52 8868529429494071296 +kindergarten 55 -7404057145074712576 +kindergarten 61 7710447533880614912 +kindergarten 66 2735 +kindergarten 69 73 +kindergarten 82 530 +kindergarten 84 7998357471114969088 +kindergarten 85 7926898770090491904 +kindergarten 86 NULL +kindergarten 90 8972161729142095872 +kindergarten 92 8716401555586727936 +kindergarten 96 -7429331808102899712 +kindergarten 100 108 +kindergarten 101 7166263463731421184 +kindergarten 109 2962 +kindergarten 111 2320 +kindergarten 116 9207927479837319168 +kindergarten 118 -7819437864839495680 +kindergarten 120 7779735136559579136 +kindergarten 122 -7079898537463537664 +kindergarten 127 2223 +linguistics NULL 8383159090746204160 +linguistics -127 -8896045754034978816 +linguistics -122 -7695491171376291840 +linguistics -113 7614435638888210432 +linguistics -101 -8017791189288869888 +linguistics -90 -7739424919198187520 +linguistics -89 8489735221193138176 +linguistics -87 2244 +linguistics -86 NULL +linguistics -78 8518454006987948032 +linguistics -77 7686992843032010752 +linguistics -73 -8916987977485312000 +linguistics -70 2277 +linguistics -69 -7104310188119834624 +linguistics -68 8184799300477943808 +linguistics -67 NULL +linguistics -53 NULL +linguistics -52 -8651641150831362048 +linguistics -41 1811 +linguistics -34 3958 +linguistics -28 7345991518378442752 +linguistics -22 8489584373231919104 +linguistics -20 7620183559667081216 +linguistics -17 9075404705968840704 +linguistics -16 2662 +linguistics -14 -9203804401302323200 +linguistics -13 7566273236152721408 +linguistics -12 NULL +linguistics -6 8145750910080745472 +linguistics -4 3789 +linguistics -2 -7501803640821456896 +linguistics 0 9023663198045544448 +linguistics 1 1386 +linguistics 3 7386087924003676160 +linguistics 6 8558000156325707776 +linguistics 7 9048297564833079296 +linguistics 10 2846 +linguistics 11 83 +linguistics 18 1261 +linguistics 33 1086 +linguistics 37 1777 +linguistics 42 9117063974299148288 +linguistics 44 9136548192574529536 +linguistics 50 9188173682239275008 +linguistics 53 1447 +linguistics 64 1704 +linguistics 67 -7201085131997011968 +linguistics 72 204 +linguistics 73 2502 +linguistics 83 NULL +linguistics 89 8116738401948377088 +linguistics 93 -7879864376629567488 +linguistics 96 803 +linguistics 98 7898670840507031552 +linguistics 100 3622 +linguistics 113 7217123582035116032 +linguistics 115 -6920172215209426944 +linguistics 123 7762823913046556672 +linguistics 125 1074 +linguistics 126 9067985867711291392 +mathematics NULL 9001907486943993856 +mathematics -127 -7158472098920390656 +mathematics -124 8290014929764040704 +mathematics -122 -7453525026342617088 +mathematics -120 3322 +mathematics -118 -6997233584896229376 +mathematics -117 2786 +mathematics -103 658 +mathematics -101 -8756989568739835904 +mathematics -100 7662037650719850496 +mathematics -99 NULL +mathematics -98 -7425160895830573056 +mathematics -95 490 +mathematics -91 8223732800007864320 +mathematics -88 -7115054815375073280 +mathematics -81 7753359568986636288 +mathematics -79 8111757081791733760 +mathematics -77 7581614118458335232 +mathematics -75 -7221474017515347968 +mathematics -66 -7894382303337832448 +mathematics -57 8219326436390821888 +mathematics -52 8435912708683087872 +mathematics -50 7746402369011277824 +mathematics -49 8156018594610790400 +mathematics -46 8210813831744118784 +mathematics -45 7237310132329488384 +mathematics -40 -8518258741831680000 +mathematics -33 7461153404961128448 +mathematics -31 681 +mathematics -21 -7661192563533062144 +mathematics -19 3159 +mathematics -7 8396433451610652672 +mathematics 0 8282648443538710528 +mathematics 3 -8887058200926093312 +mathematics 6 1701 +mathematics 10 7259955893466931200 +mathematics 22 6934570741217755136 +mathematics 23 7271887863395459072 +mathematics 25 -7333362172439035904 +mathematics 32 2073 +mathematics 35 -7558524160894427136 +mathematics 38 -7557017910095650816 +mathematics 39 2579 +mathematics 46 -7759425383684849664 +mathematics 48 1366 +mathematics 50 3029 +mathematics 53 7549858023389003776 +mathematics 55 2227 +mathematics 56 898 +mathematics 58 3830 +mathematics 59 7487338208419823616 +mathematics 62 883 +mathematics 63 8156782979767238656 +mathematics 65 1648 +mathematics 76 8927691194719174656 +mathematics 79 7871189141676998656 +mathematics 80 NULL +mathematics 82 1093 +mathematics 87 3707 +mathematics 92 -8754992450211692544 +mathematics 98 2398 +mathematics 102 690 +mathematics 107 8391407951622815744 +mathematics 111 2607 +mathematics 114 3094 +nap time NULL -8430283518005846016 +nap time -122 8660248367767076864 +nap time -119 2715 +nap time -115 -7576194692683563008 +nap time -113 8451612303224520704 +nap time -104 -7709958788604936192 +nap time -102 -6938706403992854528 +nap time -101 2229 +nap time -91 85 +nap time -85 3932 +nap time -71 9136234417125007360 +nap time -61 1524 +nap time -54 -8562524688907485184 +nap time -49 8079573715140485120 +nap time -45 7310869618402910208 +nap time -41 7801697837312884736 +nap time -31 939 +nap time -15 7401968422230032384 +nap time -6 7843804446688264192 +nap time -4 736 +nap time -2 8697823501349609472 +nap time 0 1940 +nap time 3 9064847977742032896 +nap time 6 7871554728617025536 +nap time 16 NULL +nap time 23 7432428551399669760 +nap time 27 1454 +nap time 31 6964585306125008896 +nap time 35 2689 +nap time 42 8652485812846567424 +nap time 51 -7881262505761710080 +nap time 52 2463 +nap time 56 -8240034910581153792 +nap time 62 NULL +nap time 69 NULL +nap time 70 66 +nap time 73 7782245855193874432 +nap time 76 -8543982423727128576 +nap time 80 2647 +nap time 89 -7888051992910274560 +nap time 90 7761834341179375616 +nap time 92 8540237852367446016 +nap time 93 -8203075743525806080 +nap time 98 7691062622443044864 +nap time 103 361 +nap time 104 7125231541858205696 +nap time 105 7045967493826387968 +nap time 107 9114850402293882880 +nap time 108 1189 +nap time 118 8910706980937261056 +opthamology NULL 8856674723376668672 +opthamology -122 9096395849845194752 +opthamology -121 3879 +opthamology -118 2072 +opthamology -111 8244041599171862528 +opthamology -99 -7875953567586451456 +opthamology -97 8752150411997356032 +opthamology -91 2255 +opthamology -86 -9066993118333706240 +opthamology -82 1606 +opthamology -79 -9117959922369060864 +opthamology -78 -7303847963918393344 +opthamology -77 8631515095562887168 +opthamology -75 8779073705407963136 +opthamology -73 908 +opthamology -63 470 +opthamology -59 3021 +opthamology -55 2177 +opthamology -49 8854495099223375872 +opthamology -48 -7877598807023386624 +opthamology -44 2675 +opthamology -35 2274 +opthamology -33 1613 +opthamology -30 8048726769133592576 +opthamology -28 8849475396952514560 +opthamology -27 3599 +opthamology -24 383 +opthamology -22 7718825401976684544 +opthamology -18 2393 +opthamology -17 -7629401308029976576 +opthamology -14 3235 +opthamology -8 7333512171174223872 +opthamology -5 7411793502161182720 +opthamology 4 9131533983989358592 +opthamology 5 130 +opthamology 21 7069729473166090240 +opthamology 39 2745 +opthamology 41 NULL +opthamology 48 3159 +opthamology 49 -7904188195431661568 +opthamology 52 -7081500255163727872 +opthamology 53 -7055760785575665664 +opthamology 55 2335 +opthamology 68 -8866442231663067136 +opthamology 69 2144 +opthamology 74 -7708932208121225216 +opthamology 76 -7647020450676146176 +opthamology 77 -6934304742087655424 +opthamology 79 1508 +opthamology 81 8920344895701393408 +opthamology 84 927 +opthamology 87 6924820982050758656 +opthamology 88 -8593419958317056000 +opthamology 89 -7978782649203228672 +opthamology 92 NULL +opthamology 96 -8418913260807217152 +opthamology 97 8935252708196999168 +opthamology 100 -7532751268425261056 +opthamology 104 1866 +opthamology 117 2835 +opthamology 120 -8340523561480437760 +opthamology 122 3462 +opthamology 125 965 +opthamology 127 412 +philosophy NULL 8759089349412847616 +philosophy -125 9199741683232399360 +philosophy -121 342 +philosophy -119 -8507279516485566464 +philosophy -115 1719 +philosophy -110 7471208109437304832 +philosophy -105 -7172594404186693632 +philosophy -103 8144552446127972352 +philosophy -100 -7603569103205916672 +philosophy -99 -8030058711611629568 +philosophy -95 3460 +philosophy -93 8720504651219001344 +philosophy -92 8649296591032172544 +philosophy -80 -7035132060308643840 +philosophy -78 1752 +philosophy -77 -7344947507044466688 +philosophy -69 2897 +philosophy -68 9185952983951343616 +philosophy -61 7271786885641666560 +philosophy -56 -7593363318079610880 +philosophy -55 3366 +philosophy -53 6987889924212203520 +philosophy -52 2824 +philosophy -51 2180 +philosophy -50 8875745082589929472 +philosophy -40 3478 +philosophy -39 1141 +philosophy -27 -8710298418608619520 +philosophy -26 -7344146703223496704 +philosophy -25 -7878145001776152576 +philosophy -17 -7515996202498473984 +philosophy -11 -7953426740065312768 +philosophy 8 -8088337436168830976 +philosophy 20 8290944180915871744 +philosophy 21 -7903158849011843072 +philosophy 22 7892026679115554816 +philosophy 29 2848 +philosophy 31 7659279803863146496 +philosophy 34 -7058986555327307776 +philosophy 38 7238339720750948352 +philosophy 41 8792059919353348096 +philosophy 43 3555 +philosophy 45 8362046808797306880 +philosophy 48 6991316084916879360 +philosophy 64 535 +philosophy 67 2715 +philosophy 68 1693 +philosophy 73 8283099811330506752 +philosophy 83 281 +philosophy 96 8682955459667951616 +philosophy 98 8613562211893919744 +philosophy 104 3541 +philosophy 108 6969599299897163776 +philosophy 117 2855 +philosophy 118 -7356685674003021824 +philosophy 120 -8300526097982226432 +philosophy 123 2140 +quiet hour NULL 7874764415950176256 +quiet hour -127 1099 +quiet hour -123 8769199243315814400 +quiet hour -121 7031339012080549888 +quiet hour -119 7608447395949109248 +quiet hour -114 8419958579638157312 +quiet hour -111 8424515140664360960 +quiet hour -105 918 +quiet hour -104 -7037638331316469760 +quiet hour -88 2919 +quiet hour -87 9182828596851990528 +quiet hour -76 -7792903881635938304 +quiet hour -73 8793387410919038976 +quiet hour -68 6982145326341423104 +quiet hour -66 383 +quiet hour -65 NULL +quiet hour -56 3567 +quiet hour -55 8569030475428511744 +quiet hour -52 8201303040648052736 +quiet hour -50 7998687089080467456 +quiet hour -48 8398862954249560064 +quiet hour -45 7378993334503694336 +quiet hour -42 NULL +quiet hour -41 7231399302953377792 +quiet hour -33 7637152193832886272 +quiet hour -31 -7744462446680375296 +quiet hour -25 NULL +quiet hour -14 997 +quiet hour -8 -7329767178250018816 +quiet hour -1 9085434340468473856 +quiet hour 0 7450416810848313344 +quiet hour 6 8795069490394882048 +quiet hour 7 2131 +quiet hour 8 -7265998318110711808 +quiet hour 13 2560 +quiet hour 21 -8293833565967810560 +quiet hour 23 1880 +quiet hour 29 2323 +quiet hour 30 9062227900376203264 +quiet hour 33 7528211148397944832 +quiet hour 35 -7046180371529351168 +quiet hour 38 2725 +quiet hour 43 8069531888205086720 +quiet hour 58 2461 +quiet hour 60 NULL +quiet hour 66 3770 +quiet hour 71 500 +quiet hour 74 -7902517224300036096 +quiet hour 80 7006803044329021440 +quiet hour 82 8853989376829833216 +quiet hour 84 8920533610804609024 +quiet hour 93 -7873753603299540992 +quiet hour 98 -9008631121684832256 +quiet hour 110 2186 +quiet hour 112 7436133434239229952 +quiet hour 115 1641 +quiet hour 120 919 +quiet hour 121 1506 +quiet hour 123 7800332581637259264 +religion NULL 7295502697317097472 +religion -125 2106 +religion -106 NULL +religion -104 820 +religion -94 9174894805640142848 +religion -93 491 +religion -81 8463868417649524736 +religion -78 7497306924248834048 +religion -77 NULL +religion -76 -8959796625322680320 +religion -71 296 +religion -70 -7461750143936897024 +religion -69 -7433265617153343488 +religion -64 -8430370933326536704 +religion -62 913 +religion -60 7700734109530767360 +religion -56 782 +religion -44 -8807361476639629312 +religion -42 -9213132862973829120 +religion -41 7266437490436341760 +religion -38 -8140349174954893312 +religion -35 7295926343524163584 +religion -32 1537 +religion -29 8221561626658881536 +religion -28 -8857335871148171264 +religion -26 1039 +religion -24 2194 +religion -23 3183 +religion -9 -8696162322976997376 +religion -7 -6968771079156654080 +religion -3 203 +religion 0 8996824426131390464 +religion 2 8995562121346260992 +religion 4 2803 +religion 5 -7159700138947862528 +religion 15 4088 +religion 17 1780 +religion 29 -7612466483992051712 +religion 31 1021 +religion 38 1751 +religion 44 -8509547439040757760 +religion 45 -8070535484085895168 +religion 49 8836228556823977984 +religion 52 7250237407877382144 +religion 54 9211455920344088576 +religion 58 3467 +religion 67 8113585123802529792 +religion 70 7919597361814577152 +religion 73 9053187076403060736 +religion 74 815 +religion 76 -7273694358642851840 +religion 78 -8051587217208967168 +religion 82 3119 +religion 92 2067 +religion 93 3848 +religion 96 -8317591428117274624 +religion 97 3456 +religion 102 -8471480409335513088 +religion 103 8815398225009967104 +religion 106 3058 +religion 107 3810 +religion 110 -7849504559236210688 +religion 115 -7712425776235274240 +religion 120 5 +religion 123 979 +religion 124 8899122608190930944 +study skills NULL 8201491077550874624 +study skills -127 -8559008501282832384 +study skills -126 3507 +study skills -117 -8400045653258444800 +study skills -107 8785153741735616512 +study skills -106 8002769767000145920 +study skills -100 -8962547695651323904 +study skills -88 2551 +study skills -86 3990 +study skills -82 NULL +study skills -81 612 +study skills -76 NULL +study skills -73 -8535957064499879936 +study skills -65 8332670681629106176 +study skills -52 3533 +study skills -36 -8485389240529354752 +study skills -33 -7213775605408178176 +study skills -27 961 +study skills -26 -9187662685618348032 +study skills -22 7204802700490858496 +study skills -17 3144 +study skills -14 3913 +study skills -13 7195454019231834112 +study skills -6 -7296096276653391872 +study skills -5 2412 +study skills -4 1094 +study skills -3 707 +study skills -1 7381659098423926784 +study skills 2 -7507578199583694848 +study skills 3 7291432593139507200 +study skills 18 743 +study skills 21 7274777328897802240 +study skills 23 8333523087360901120 +study skills 25 -8704234107608203264 +study skills 28 -8494118409594650624 +study skills 29 -7220731681653604352 +study skills 30 2637 +study skills 35 -7488415863027367936 +study skills 39 8897901899039473664 +study skills 40 3961 +study skills 47 -7049618574399692800 +study skills 49 -7326863346317598720 +study skills 50 7054271419461812224 +study skills 54 7128222874437238784 +study skills 55 1368 +study skills 58 -7030489936116252672 +study skills 62 8372588378498777088 +study skills 63 2512 +study skills 66 -7497303453253402624 +study skills 68 3725 +study skills 72 -8028275725610909696 +study skills 77 7354813692542304256 +study skills 80 -9078662294976061440 +study skills 83 -7779270198785875968 +study skills 92 3059 +study skills 95 7393308503950548992 +study skills 96 -8046189486447017984 +study skills 101 2295 +study skills 106 -8161047750470279168 +study skills 107 9132009829414584320 +study skills 110 -7998947380180819968 +study skills 115 7344029858387820544 +study skills 119 2264 +study skills 123 -7797151404935618560 +topology NULL 8639254009546055680 +topology -122 3941 +topology -116 -7964801953178091520 +topology -106 NULL +topology -105 -7824788571789279232 +topology -102 8783241818558193664 +topology -98 7212090742612467712 +topology -96 3568 +topology -88 -6975459232300236800 +topology -86 2515 +topology -78 7347732772348870656 +topology -74 8773222500321361920 +topology -71 4037 +topology -60 1493 +topology -58 2619 +topology -57 8895174927321243648 +topology -50 8168742078705262592 +topology -44 -8664374244449050624 +topology -42 2434 +topology -41 1153 +topology -36 3588 +topology -32 -8923529803981905920 +topology -31 -7330413050756235264 +topology -25 244 +topology -21 -8615168537390571520 +topology -5 1439 +topology -1 7391208370547269632 +topology 7 7339426767877390336 +topology 11 3333 +topology 13 8411494452500930560 +topology 14 8367680396909404160 +topology 18 -9189155542884474880 +topology 26 2218 +topology 30 2348 +topology 38 7410096605330227200 +topology 41 2608 +topology 42 8547243497773457408 +topology 47 7705445437881278464 +topology 50 NULL +topology 52 1899 +topology 54 7091300332052062208 +topology 55 -6935038507792801792 +topology 58 -7616522969329262592 +topology 59 7212016545671348224 +topology 61 1914 +topology 63 -8961059046745669632 +topology 67 3680 +topology 69 2358 +topology 71 812 +topology 80 4075 +topology 81 22 +topology 83 1477 +topology 86 294 +topology 87 8900180888218329088 +topology 94 8146492373537660928 +topology 105 462 +topology 107 112 +topology 119 8525336514806317056 +topology 121 7933040277013962752 +topology 127 -8835408234247168000 +undecided NULL 8811693967537774592 +undecided -120 7242751359672631296 +undecided -118 4078 +undecided -117 7086206629592252416 +undecided -116 7013693841855774720 +undecided -115 NULL +undecided -114 8761174805938331648 +undecided -112 367 +undecided -105 4030 +undecided -104 8625937019655200768 +undecided -96 7697541332524376064 +undecided -93 -7777884099756122112 +undecided -90 NULL +undecided -84 -9109392978217484288 +undecided -83 7175638927948562432 +undecided -78 -9157613004431998976 +undecided -69 3907 +undecided -62 -8914039133569400832 +undecided -53 1827 +undecided -52 3071 +undecided -51 481 +undecided -43 7491898395977523200 +undecided -41 7690986322714066944 +undecided -29 1774 +undecided -28 4024 +undecided -23 1371 +undecided -19 -8523434203900674048 +undecided -13 3823 +undecided -10 NULL +undecided -8 8470141334513098752 +undecided 0 7944741547145502720 +undecided 1 1671 +undecided 7 8269730157217062912 +undecided 11 9089435102788009984 +undecided 13 -7700203302632210432 +undecided 14 9190466190353661952 +undecided 27 3622 +undecided 30 1910 +undecided 33 -8465978403747037184 +undecided 37 -7507424948896415744 +undecided 45 -8335810316927213568 +undecided 47 -8503573595507761152 +undecided 50 7570474972934488064 +undecided 51 1545 +undecided 56 8583916402383601664 +undecided 69 -7140008543769042944 +undecided 76 8169878743136043008 +undecided 95 2821 +undecided 97 -7451660755269853184 +undecided 98 443 +undecided 111 9180098147855769600 +undecided 114 3006 +undecided 119 168 +undecided 123 1187 +undecided 124 7888238729321496576 +values clariffication NULL -7456869587112255488 +values clariffication -123 7412924364686458880 +values clariffication -119 NULL +values clariffication -114 7235109456886816768 +values clariffication -109 7909645665163804672 +values clariffication -108 -7246123871306244096 +values clariffication -107 8295110846998233088 +values clariffication -105 7555301305375858688 +values clariffication -100 1053 +values clariffication -98 -8013397854633648128 +values clariffication -97 8579974641030365184 +values clariffication -95 -8996954350906294272 +values clariffication -92 2011 +values clariffication -90 1423 +values clariffication -81 6996686091335884800 +values clariffication -75 2569 +values clariffication -70 3083 +values clariffication -69 -7329807949048193024 +values clariffication -67 169 +values clariffication -63 -6947955278050181120 +values clariffication -62 2712 +values clariffication -60 2971 +values clariffication -55 3904 +values clariffication -51 3637 +values clariffication -50 8199513544090730496 +values clariffication -48 7370078518278397952 +values clariffication -46 NULL +values clariffication -42 -7611584069753552896 +values clariffication -40 2942 +values clariffication -38 2991 +values clariffication -37 7581052107944361984 +values clariffication -31 763 +values clariffication -28 3352 +values clariffication -8 8148211378319933440 +values clariffication -6 2056 +values clariffication -5 3031 +values clariffication 4 489 +values clariffication 8 -8426531414463545344 +values clariffication 9 -8603817012434198528 +values clariffication 10 7186401810812059648 +values clariffication 12 8190539859890601984 +values clariffication 14 -8147405381260345344 +values clariffication 15 7575087487730196480 +values clariffication 21 NULL +values clariffication 23 3255 +values clariffication 30 9107991000536498176 +values clariffication 32 NULL +values clariffication 42 2020 +values clariffication 50 1983 +values clariffication 53 3887 +values clariffication 56 3608 +values clariffication 57 -7669169138124275712 +values clariffication 62 3910 +values clariffication 70 1287 +values clariffication 74 2533 +values clariffication 80 7220131672176058368 +values clariffication 85 2986 +values clariffication 92 -8490382417169408000 +values clariffication 93 7741854854673367040 +values clariffication 96 2625 +values clariffication 97 3858 +values clariffication 98 8515682078777081856 +values clariffication 108 1115 +values clariffication 118 -9022154842129547264 +values clariffication 120 6927260280037097472 +values clariffication 122 -9084940280061485056 +values clariffication 124 -9210275791460499456 +values clariffication 126 3673 +values clariffication 127 -8347088645602050048 +wind surfing NULL 7961515985722605568 +wind surfing -124 1048 +wind surfing -121 7164349895861829632 +wind surfing -117 1177 +wind surfing -116 1941 +wind surfing -113 4020 +wind surfing -111 8415171956168417280 +wind surfing -104 8666178591503564800 +wind surfing -102 1518 +wind surfing -99 7265141874315517952 +wind surfing -98 8457906374051020800 +wind surfing -96 3147 +wind surfing -83 1509 +wind surfing -80 3630 +wind surfing -78 -9088239683374350336 +wind surfing -71 1990 +wind surfing -65 1495 +wind surfing -60 601 +wind surfing -57 6926925215281774592 +wind surfing -56 7107604675626008576 +wind surfing -42 2619 +wind surfing -39 3554 +wind surfing -38 7410872053689794560 +wind surfing -34 -7535857766791577600 +wind surfing -31 -9105701280936501248 +wind surfing -30 3945 +wind surfing -21 NULL +wind surfing -18 -8117838333114212352 +wind surfing -14 7130306447560826880 +wind surfing -12 3974 +wind surfing -8 9016280522993975296 +wind surfing -6 -6951350560260784128 +wind surfing -2 661 +wind surfing 5 3286 +wind surfing 11 -9149719074367946752 +wind surfing 15 3940 +wind surfing 16 2193 +wind surfing 20 3703 +wind surfing 21 8004633750273925120 +wind surfing 22 -7055619148037554176 +wind surfing 26 -8746702976270385152 +wind surfing 29 1157 +wind surfing 33 1055 +wind surfing 36 1559 +wind surfing 37 7593521922173419520 +wind surfing 40 -8831091081349758976 +wind surfing 41 3722 +wind surfing 44 3462 +wind surfing 45 8287522765741301760 +wind surfing 48 7414865343000322048 +wind surfing 52 8524940073536954368 +wind surfing 53 1856 +wind surfing 54 8100036735858401280 +wind surfing 58 7517159036469575680 +wind surfing 59 3613 +wind surfing 62 -8330233444291084288 +wind surfing 63 501 +wind surfing 64 7490717730239250432 +wind surfing 66 -7840338174858199040 +wind surfing 68 8455496814886002688 +wind surfing 71 9085381906890203136 +wind surfing 75 8825059717746376704 +wind surfing 78 6923604860394528768 +wind surfing 86 9169248521377374208 +wind surfing 88 1132 +wind surfing 92 1530 +wind surfing 96 8142241016679735296 +wind surfing 101 -7848043121524228096 +wind surfing 102 2649 +wind surfing 107 1032 +wind surfing 108 8508401924853850112 +wind surfing 111 8000440057238052864 +wind surfing 112 7077311975029555200 +wind surfing 121 3579 +xylophone band NULL 3401 +xylophone band -115 -7000925438663041024 +xylophone band -112 -8705403811649355776 +xylophone band -109 -7804116532814151680 +xylophone band -104 3510 +xylophone band -101 2786 +xylophone band -98 7060236714847412224 +xylophone band -96 8984935029383389184 +xylophone band -94 -7380731416973295616 +xylophone band -91 914 +xylophone band -78 8222714144797368320 +xylophone band -75 3084 +xylophone band -72 -7642381493746483200 +xylophone band -70 -8270479187688816640 +xylophone band -68 -8768744394742235136 +xylophone band -59 1845 +xylophone band -46 -7052619594823221248 +xylophone band -27 618 +xylophone band -26 3397 +xylophone band -23 8837420822750314496 +xylophone band -15 -9051477157204770816 +xylophone band -13 8708232769657815040 +xylophone band -10 -7404052043914526720 +xylophone band -6 8410599906334097408 +xylophone band -3 2803 +xylophone band -2 -7989766326847807488 +xylophone band 0 2988 +xylophone band 3 8286706213485297664 +xylophone band 6 1676 +xylophone band 7 7065344324692443136 +xylophone band 9 -7262798781688651776 +xylophone band 11 7500716020874674176 +xylophone band 14 -7881351200983613440 +xylophone band 17 7778936842502275072 +xylophone band 19 2984 +xylophone band 22 -7551394356730339328 +xylophone band 23 7675009476762918912 +xylophone band 33 311 +xylophone band 36 -6917607783359897600 +xylophone band 38 3067 +xylophone band 40 8129551357032259584 +xylophone band 52 -7687052294777208832 +xylophone band 60 1541 +xylophone band 62 8365058996333953024 +xylophone band 77 9209153648361848832 +xylophone band 79 1307 +xylophone band 84 1398 +xylophone band 85 7592440105065308160 +xylophone band 88 584 +xylophone band 92 -7399631791131074560 +xylophone band 94 8643198489997254656 +xylophone band 107 -8357136656913686528 +xylophone band 108 2108 +xylophone band 112 7921639119138070528 +xylophone band 118 1728 +xylophone band 122 9207107990561972224 +xylophone band 123 8677794924343164928 +xylophone band 125 263 +xylophone band 127 NULL +yard duty NULL 1972 +yard duty -127 2719 +yard duty -115 8279056098670198784 +yard duty -114 3747 +yard duty -109 2791 +yard duty -103 -8659692318743314432 +yard duty -102 NULL +yard duty -100 7309156463509061632 +yard duty -98 7492436934952574976 +yard duty -93 NULL +yard duty -91 -7094189393339678720 +yard duty -88 8145745969573666816 +yard duty -86 3606 +yard duty -85 NULL +yard duty -82 -8191825921746305024 +yard duty -76 3563 +yard duty -62 71 +yard duty -61 -7541860097718902784 +yard duty -57 8059284960252731392 +yard duty -53 2843 +yard duty -51 -7686220526274502656 +yard duty -50 8220104397160169472 +yard duty -49 NULL +yard duty -48 9040958359122640896 +yard duty -46 8641221723991433216 +yard duty -45 9139805788041134080 +yard duty -44 1065 +yard duty -33 1075 +yard duty -28 -7444070205513138176 +yard duty -27 7199539820886958080 +yard duty -21 2092 +yard duty -19 8656571350884048896 +yard duty -17 NULL +yard duty -15 1481 +yard duty -1 -9012093603044245504 +yard duty 8 -7194281951646187520 +yard duty 9 -8345065519816695808 +yard duty 10 3212 +yard duty 15 8945004737083555840 +yard duty 18 3901 +yard duty 22 -7109790267244814336 +yard duty 25 1030 +yard duty 28 7220581538170413056 +yard duty 30 7394967727502467072 +yard duty 34 -7858505678035951616 +yard duty 42 3724 +yard duty 48 8780196485890555904 +yard duty 53 590 +yard duty 55 8417381121663746048 +yard duty 57 677 +yard duty 60 1781 +yard duty 64 -8084716955963252736 +yard duty 65 735 +yard duty 78 -8275337702906757120 +yard duty 86 2688 +yard duty 90 -7692192232238678016 +yard duty 102 2004 +yard duty 105 80 +yard duty 110 7153922334283776000 +zync studies NULL 8962097525980225536 +zync studies -127 3079 +zync studies -117 -9219066990552760320 +zync studies -116 1542 +zync studies -111 1681 +zync studies -105 8665969966920990720 +zync studies -103 8509508263705477120 +zync studies -102 8900545829211299840 +zync studies -94 2563 +zync studies -90 8160569434550403072 +zync studies -83 8213810702473183232 +zync studies -79 -7152177800841502720 +zync studies -78 8235179243092090880 +zync studies -68 1870 +zync studies -61 -6974654664348033024 +zync studies -58 -8859107121649893376 +zync studies -57 3213 +zync studies -54 8555948987770511360 +zync studies -52 3462 +zync studies -50 2016 +zync studies -45 -8300764106868350976 +zync studies -40 3244 +zync studies -39 8099215208813903872 +zync studies -38 2514 +zync studies -37 2412 +zync studies -36 412 +zync studies -35 9000633029632499712 +zync studies -34 579 +zync studies -33 -8086577583338061824 +zync studies -32 2540 +zync studies -27 7936149988210212864 +zync studies -26 -8453491903284994048 +zync studies -21 296 +zync studies -20 9104574294205636608 +zync studies -12 2325 +zync studies 1 3841 +zync studies 7 1127 +zync studies 9 2878 +zync studies 11 279 +zync studies 15 8731960288562044928 +zync studies 19 -8714995808835444736 +zync studies 31 2306 +zync studies 37 2979 +zync studies 48 2205 +zync studies 59 NULL +zync studies 63 -8877431933441327104 +zync studies 70 8294315622451740672 +zync studies 87 -9203942396257984512 +zync studies 90 2850 +zync studies 91 8091421389575282688 +zync studies 98 1252 +zync studies 99 1608 +zync studies 100 8536948829863198720 +zync studies 116 9073672806863790080 +zync studies 120 7845953007588401152 diff --git a/ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out b/ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out index 6dfcd83..71367c4 100644 --- a/ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_left_outer_join.q.out @@ -54,6 +54,8 @@ STAGE PLANS: 0 cint (type: int) 1 cint (type: int) outputColumnNames: _col0 + input vertices: + 1 Map 4 Statistics: Num rows: 13516 Data size: 414960 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -64,6 +66,8 @@ STAGE PLANS: keys: 0 _col0 (type: tinyint) 1 ctinyint (type: tinyint) + input vertices: + 1 Map 1 Statistics: Num rows: 14867 Data size: 456456 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 14867 Data size: 456456 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out index 27423b2..4b35231 100644 --- a/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_mapjoin_reduce.q.out @@ -204,6 +204,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 l_partkey (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 1 Map 1 Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -215,6 +217,8 @@ STAGE PLANS: 0 _col1 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col3 + input vertices: + 1 Map 4 Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col3 (type: int) @@ -363,6 +367,8 @@ STAGE PLANS: 0 _col0 (type: int) 1 l_partkey (type: int) outputColumnNames: _col0, _col1, _col3 + input vertices: + 1 Map 1 Statistics: Num rows: 831 Data size: 3326 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -374,6 +380,8 @@ STAGE PLANS: 0 _col1 (type: int), 1 (type: int) 1 _col0 (type: int), _col1 (type: int) outputColumnNames: _col0, _col3 + input vertices: + 1 Map 4 Statistics: Num rows: 914 Data size: 3658 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col3 (type: int) diff --git a/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out b/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out new file mode 100644 index 0000000..9b7e8f6 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_non_string_partition.q.out @@ -0,0 +1,182 @@ +PREHOOK: query: CREATE TABLE non_string_part(cint INT, cstring1 STRING, cdouble DOUBLE, ctimestamp1 TIMESTAMP) PARTITIONED BY (ctinyint tinyint) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@non_string_part +POSTHOOK: query: CREATE TABLE non_string_part(cint INT, cstring1 STRING, cdouble DOUBLE, ctimestamp1 TIMESTAMP) PARTITIONED BY (ctinyint tinyint) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@non_string_part +PREHOOK: query: INSERT OVERWRITE TABLE non_string_part PARTITION(ctinyint) SELECT cint, cstring1, cdouble, ctimestamp1, ctinyint fROM alltypesorc +WHERE ctinyint IS NULL AND cdouble IS NOT NULL ORDER BY cdouble +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@non_string_part +POSTHOOK: query: INSERT OVERWRITE TABLE non_string_part PARTITION(ctinyint) SELECT cint, cstring1, cdouble, ctimestamp1, ctinyint fROM alltypesorc +WHERE ctinyint IS NULL AND cdouble IS NOT NULL ORDER BY cdouble +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__ +POSTHOOK: Lineage: non_string_part PARTITION(ctinyint=__HIVE_DEFAULT_PARTITION__).cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: non_string_part PARTITION(ctinyint=__HIVE_DEFAULT_PARTITION__).cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: non_string_part PARTITION(ctinyint=__HIVE_DEFAULT_PARTITION__).cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: non_string_part PARTITION(ctinyint=__HIVE_DEFAULT_PARTITION__).ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +PREHOOK: query: SHOW PARTITIONS non_string_part +PREHOOK: type: SHOWPARTITIONS +PREHOOK: Input: default@non_string_part +POSTHOOK: query: SHOW PARTITIONS non_string_part +POSTHOOK: type: SHOWPARTITIONS +POSTHOOK: Input: default@non_string_part +ctinyint=__HIVE_DEFAULT_PARTITION__ +PREHOOK: query: EXPLAIN SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: non_string_part + Statistics: Num rows: 3073 Data size: 339150 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (cint > 0) (type: boolean) + Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), ctinyint (type: tinyint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: tinyint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: tinyint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 1100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 1100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@non_string_part +PREHOOK: Input: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__ +#### A masked pattern was here #### +POSTHOOK: query: SELECT cint, ctinyint FROM non_string_part WHERE cint > 0 ORDER BY cint LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@non_string_part +POSTHOOK: Input: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__ +#### A masked pattern was here #### +762 NULL +762 NULL +6981 NULL +6981 NULL +6981 NULL +86028 NULL +504142 NULL +799471 NULL +1248059 NULL +1286921 NULL +PREHOOK: query: EXPLAIN SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: non_string_part + Statistics: Num rows: 3073 Data size: 339150 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (cint > 0) (type: boolean) + Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int), cstring1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1024 Data size: 113013 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 1100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 1100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@non_string_part +PREHOOK: Input: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__ +#### A masked pattern was here #### +POSTHOOK: query: SELECT cint, cstring1 FROM non_string_part WHERE cint > 0 ORDER BY cint, cstring1 LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@non_string_part +POSTHOOK: Input: default@non_string_part@ctinyint=__HIVE_DEFAULT_PARTITION__ +#### A masked pattern was here #### +762 3WsVeqb28VWEEOLI8ail +762 40ks5556SV +6981 1FNNhmiFLGw425NA13g +6981 o5mb0QP5Y48Qd4vdB0 +6981 sF2CRfgt2K +86028 T2o8XRFAL0HC4ikDQnfoCymw +504142 PlOxor04p5cvVl +799471 2fu24 +1248059 Uhps6mMh3IfHB3j7yH62K +1286921 ODLrXI8882q8LS8 diff --git a/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out new file mode 100644 index 0000000..8a4d25a --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_orderby_5.q.out @@ -0,0 +1,193 @@ +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: explain +select bo, max(b) from vectortab2korc group by bo order by bo desc +PREHOOK: type: QUERY +POSTHOOK: query: explain +select bo, max(b) from vectortab2korc group by bo order by bo desc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: bo (type: boolean), b (type: bigint) + outputColumnNames: bo, b + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(b) + keys: bo (type: boolean) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: + + Map-reduce partition columns: _col0 (type: boolean) + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + keys: KEY._col0 (type: boolean) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: boolean), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: - + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select bo, max(b) from vectortab2korc group by bo order by bo desc +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +POSTHOOK: query: select bo, max(b) from vectortab2korc group by bo order by bo desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +true 9211455920344088576 +false 9209153648361848832 +NULL 9180098147855769600 diff --git a/ql/src/test/results/clientpositive/tez/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/tez/vector_varchar_4.q.out new file mode 100644 index 0000000..02fa042 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vector_varchar_4.q.out @@ -0,0 +1,175 @@ +PREHOOK: query: drop table if exists vectortab2k +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vectortab2k +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists vectortab2korc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vectortab2korc +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: drop table if exists varchar_lazy_binary_columnar +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists varchar_lazy_binary_columnar +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table varchar_lazy_binary_columnar(vt varchar(10), vsi varchar(10), vi varchar(20), vb varchar(30), vf varchar(20),vd varchar(20),vs varchar(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@varchar_lazy_binary_columnar +POSTHOOK: query: create table varchar_lazy_binary_columnar(vt varchar(10), vsi varchar(10), vi varchar(20), vb varchar(30), vf varchar(20),vd varchar(20),vs varchar(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@varchar_lazy_binary_columnar +PREHOOK: query: explain +insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( t AS varchar(10)) (type: varchar(10)), CAST( si AS varchar(10)) (type: varchar(10)), CAST( i AS varchar(20)) (type: varchar(20)), CAST( b AS varchar(30)) (type: varchar(30)), CAST( f AS varchar(20)) (type: varchar(20)), CAST( d AS varchar(20)) (type: varchar(20)), CAST( s AS varchar(50)) (type: varchar(50)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + name: default.varchar_lazy_binary_columnar + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + name: default.varchar_lazy_binary_columnar + + Stage: Stage-3 + Stats-Aggr Operator + diff --git a/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out index f097414..f3d9147 100644 --- a/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_varchar_simple.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: drop table char_2 +PREHOOK: query: drop table varchar_2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table char_2 +POSTHOOK: query: drop table varchar_2 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table char_2 ( +PREHOOK: query: create table varchar_2 ( key varchar(10), value varchar(20) ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@char_2 -POSTHOOK: query: create table char_2 ( +PREHOOK: Output: default@varchar_2 +POSTHOOK: query: create table varchar_2 ( key varchar(10), value varchar(20) ) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@char_2 -PREHOOK: query: insert overwrite table char_2 select * from src +POSTHOOK: Output: default@varchar_2 +PREHOOK: query: insert overwrite table varchar_2 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@char_2 -POSTHOOK: query: insert overwrite table char_2 select * from src +PREHOOK: Output: default@varchar_2 +POSTHOOK: query: insert overwrite table varchar_2 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@char_2 -POSTHOOK: Lineage: char_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: char_2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@varchar_2 +POSTHOOK: Lineage: varchar_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: varchar_2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select key, value from src order by key asc @@ -46,12 +46,12 @@ POSTHOOK: Input: default@src 10 val_10 100 val_100 PREHOOK: query: explain select key, value -from char_2 +from varchar_2 order by key asc limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value -from char_2 +from varchar_2 order by key asc limit 5 POSTHOOK: type: QUERY @@ -69,7 +69,7 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: char_2 + alias: varchar_2 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: varchar(10)), value (type: varchar(20)) @@ -107,19 +107,19 @@ STAGE PLANS: PREHOOK: query: -- should match the query from src select key, value -from char_2 +from varchar_2 order by key asc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@char_2 +PREHOOK: Input: default@varchar_2 #### A masked pattern was here #### POSTHOOK: query: -- should match the query from src select key, value -from char_2 +from varchar_2 order by key asc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@char_2 +POSTHOOK: Input: default@varchar_2 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -146,12 +146,12 @@ POSTHOOK: Input: default@src 97 val_97 96 val_96 PREHOOK: query: explain select key, value -from char_2 +from varchar_2 order by key desc limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value -from char_2 +from varchar_2 order by key desc limit 5 POSTHOOK: type: QUERY @@ -169,7 +169,7 @@ STAGE PLANS: Map 1 Map Operator Tree: TableScan - alias: char_2 + alias: varchar_2 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: varchar(10)), value (type: varchar(20)) @@ -207,30 +207,136 @@ STAGE PLANS: PREHOOK: query: -- should match the query from src select key, value -from char_2 +from varchar_2 order by key desc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@char_2 +PREHOOK: Input: default@varchar_2 #### A masked pattern was here #### POSTHOOK: query: -- should match the query from src select key, value -from char_2 +from varchar_2 order by key desc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@char_2 +POSTHOOK: Input: default@varchar_2 #### A masked pattern was here #### 98 val_98 98 val_98 97 val_97 97 val_97 96 val_96 -PREHOOK: query: drop table char_2 +PREHOOK: query: drop table varchar_2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@char_2 -PREHOOK: Output: default@char_2 -POSTHOOK: query: drop table char_2 +PREHOOK: Input: default@varchar_2 +PREHOOK: Output: default@varchar_2 +POSTHOOK: query: drop table varchar_2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@char_2 -POSTHOOK: Output: default@char_2 +POSTHOOK: Input: default@varchar_2 +POSTHOOK: Output: default@varchar_2 +PREHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. +create table varchar_3 ( + field varchar(25) +) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@varchar_3 +POSTHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. +create table varchar_3 ( + field varchar(25) +) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@varchar_3 +PREHOOK: query: explain +insert into table varchar_3 select cint from alltypesorc limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert into table varchar_3 select cint from alltypesorc limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( _col0 AS varchar(25)) (type: varchar(25)) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.varchar_3 + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.varchar_3 + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert into table varchar_3 select cint from alltypesorc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@varchar_3 +POSTHOOK: query: insert into table varchar_3 select cint from alltypesorc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@varchar_3 +POSTHOOK: Lineage: varchar_3.field EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: drop table varchar_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@varchar_3 +PREHOOK: Output: default@varchar_3 +POSTHOOK: query: drop table varchar_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@varchar_3 +POSTHOOK: Output: default@varchar_3 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out new file mode 100644 index 0000000..7703158 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out @@ -0,0 +1,1127 @@ +PREHOOK: query: -- Use ORDER BY clauses to generate 2 stages. +EXPLAIN +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: -- Use ORDER BY clauses to generate 2 stages. +EXPLAIN +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ctinyint), max(ctinyint), count(ctinyint), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: tinyint), _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint), VALUE._col0 (type: tinyint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-64 62 9173 12288 +PREHOOK: query: EXPLAIN +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(ctinyint) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-39856 +PREHOOK: query: EXPLAIN +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(ctinyint), variance(ctinyint), var_pop(ctinyint), var_samp(ctinyint), std(ctinyint), stddev(ctinyint), stddev_pop(ctinyint), stddev_samp(ctinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-4.344925324321378 1158.3003004768184 1158.3003004768184 1158.4265870337827 34.033811136527426 34.033811136527426 34.033811136527426 34.03566639620536 +PREHOOK: query: EXPLAIN +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: cbigint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(cbigint), max(cbigint), count(cbigint), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-2147311592 2145498388 9173 12288 +PREHOOK: query: EXPLAIN +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: cbigint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(cbigint) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-1698460028409 +PREHOOK: query: EXPLAIN +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: cbigint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(cbigint), variance(cbigint), var_pop(cbigint), var_samp(cbigint), std(cbigint), stddev(cbigint), stddev_pop(cbigint), stddev_samp(cbigint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-1.8515862077935246E8 2.07689300543081907E18 2.07689300543081907E18 2.07711944383088768E18 1.441142951074188E9 1.441142951074188E9 1.441142951074188E9 1.4412215110214279E9 +PREHOOK: query: EXPLAIN +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cfloat (type: float) + outputColumnNames: cfloat + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(cfloat), max(cfloat), count(cfloat), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: float), _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: float) + sort order: + + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: float), VALUE._col0 (type: float), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-64.0 79.553 9173 12288 +PREHOOK: query: EXPLAIN +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cfloat (type: float) + outputColumnNames: cfloat + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(cfloat) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-39479.635992884636 +PREHOOK: query: EXPLAIN +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cfloat (type: float) + outputColumnNames: cfloat + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(cfloat), variance(cfloat), var_pop(cfloat), var_samp(cfloat), std(cfloat), stddev(cfloat), stddev_pop(cfloat), stddev_samp(cfloat) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + Reducer 3 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-4.303895780321011 1163.8972588604984 1163.8972588604984 1164.0241556397025 34.115938487171924 34.115938487171924 34.115938487171924 34.11779822379666 +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: EXPLAIN +SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((cstring2 like '%b%') or ((79.553 <> cint) or (cbigint < cdouble))) (type: boolean) + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint) + outputColumnNames: cbigint, cfloat, ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(cbigint), stddev_pop(cbigint), var_samp(cbigint), count(), sum(cfloat), min(ctinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: bigint), _col4 (type: double), _col5 (type: tinyint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), stddev_pop(VALUE._col1), var_samp(VALUE._col2), count(VALUE._col3), sum(VALUE._col4), min(VALUE._col5) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), (- _col0) (type: double), (-6432 + _col0) (type: double), _col1 (type: double), (- (-6432 + _col0)) (type: double), ((- (-6432 + _col0)) + (-6432 + _col0)) (type: double), _col2 (type: double), (- (-6432 + _col0)) (type: double), (-6432 + (- (-6432 + _col0))) (type: double), (- (-6432 + _col0)) (type: double), ((- (-6432 + _col0)) / (- (-6432 + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432 + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-3.875652215945533E8 3.875652215945533E8 -3.875716535945533E8 1.436387455459401E9 3.875716535945533E8 0.0 2.06347151720204902E18 3.875716535945533E8 3.875652215945533E8 3.875716535945533E8 1.0 10934 -37224.52399241924 1.0517370547117279E9 -2.06347151720204902E18 1.5020929380914048E17 -64 64 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_1.q.out b/ql/src/test/results/clientpositive/tez/vectorization_1.q.out new file mode 100644 index 0000000..6f428e1 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_1.q.out @@ -0,0 +1,45 @@ +PREHOOK: query: SELECT VAR_POP(ctinyint), + (VAR_POP(ctinyint) / -26.28), + SUM(cfloat), + (-1.389 + SUM(cfloat)), + (SUM(cfloat) * (-1.389 + SUM(cfloat))), + MAX(ctinyint), + (-((SUM(cfloat) * (-1.389 + SUM(cfloat))))), + MAX(cint), + (MAX(cint) * 79.553), + VAR_SAMP(cdouble), + (10.175 % (-((SUM(cfloat) * (-1.389 + SUM(cfloat)))))), + COUNT(cint), + (-563 % MAX(cint)) +FROM alltypesorc +WHERE (((cdouble > ctinyint) + AND (cboolean2 > 0)) + OR ((cbigint < ctinyint) + OR ((cint > cbigint) + OR (cboolean1 < 0)))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT VAR_POP(ctinyint), + (VAR_POP(ctinyint) / -26.28), + SUM(cfloat), + (-1.389 + SUM(cfloat)), + (SUM(cfloat) * (-1.389 + SUM(cfloat))), + MAX(ctinyint), + (-((SUM(cfloat) * (-1.389 + SUM(cfloat))))), + MAX(cint), + (MAX(cint) * 79.553), + VAR_SAMP(cdouble), + (10.175 % (-((SUM(cfloat) * (-1.389 + SUM(cfloat)))))), + COUNT(cint), + (-563 % MAX(cint)) +FROM alltypesorc +WHERE (((cdouble > ctinyint) + AND (cboolean2 > 0)) + OR ((cbigint < ctinyint) + OR ((cint > cbigint) + OR (cboolean1 < 0)))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +1074.830257547229 -40.89917266161449 -21997.674998402596 -21999.063998402595 4.839282601059194E8 62 -4.839282601059194E8 1073680599 8.5414512692247E10 7.569848642620903E10 10.175 3745 -563 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_10.q.out b/ql/src/test/results/clientpositive/tez/vectorization_10.q.out new file mode 100644 index 0000000..a2f3ce0 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_10.q.out @@ -0,0 +1,294 @@ +PREHOOK: query: SELECT cdouble, + ctimestamp1, + ctinyint, + cboolean1, + cstring1, + (-(cdouble)), + (cdouble + csmallint), + ((cdouble + csmallint) % 33), + (-(cdouble)), + (ctinyint % cdouble), + (ctinyint % csmallint), + (-(cdouble)), + (cbigint * (ctinyint % csmallint)), + (9763215.5639 - (cdouble + csmallint)), + (-((-(cdouble)))) +FROM alltypesorc +WHERE (((cstring2 <= '10') + OR ((ctinyint > cdouble) + AND (-5638.15 >= ctinyint))) + OR ((cdouble > 6981) + AND ((csmallint = 9763215.5639) + OR (cstring1 LIKE '%a')))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdouble, + ctimestamp1, + ctinyint, + cboolean1, + cstring1, + (-(cdouble)), + (cdouble + csmallint), + ((cdouble + csmallint) % 33), + (-(cdouble)), + (ctinyint % cdouble), + (ctinyint % csmallint), + (-(cdouble)), + (cbigint * (ctinyint % csmallint)), + (9763215.5639 - (cdouble + csmallint)), + (-((-(cdouble)))) +FROM alltypesorc +WHERE (((cstring2 <= '10') + OR ((ctinyint > cdouble) + AND (-5638.15 >= ctinyint))) + OR ((cdouble > 6981) + AND ((csmallint = 9763215.5639) + OR (cstring1 LIKE '%a')))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-200.0 1969-12-31 15:59:51.342 60 NULL NULL 200.0 -400.0 -4.0 200.0 60.0 60 200.0 118868432400 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:53.476 -22 NULL NULL 200.0 -400.0 -4.0 200.0 -22.0 -22 200.0 -3315653088 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:07.731 44 NULL NULL 200.0 -400.0 -4.0 200.0 44.0 44 200.0 -59205151456 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:53.743 25 NULL NULL 200.0 -400.0 -4.0 200.0 25.0 25 200.0 46547828825 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:09.883 46 NULL NULL 200.0 -400.0 -4.0 200.0 46.0 46 200.0 -20096868102 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:15.143 16 NULL NULL 200.0 -400.0 -4.0 200.0 16.0 16 200.0 -33756365728 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:02.972 -58 NULL NULL 200.0 -400.0 -4.0 200.0 -58.0 -58 200.0 60297449542 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:11.15 -30 NULL NULL 200.0 -400.0 -4.0 200.0 -30.0 -30 200.0 25029255630 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:03.794 13 NULL NULL 200.0 -400.0 -4.0 200.0 13.0 13 200.0 11630250073 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:49.331 -46 NULL NULL 200.0 -400.0 -4.0 200.0 -46.0 -46 200.0 93596894876 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:47.828 -10 NULL NULL 200.0 -400.0 -4.0 200.0 -10.0 -10 200.0 7156607330 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:06.3 14 NULL NULL 200.0 -400.0 -4.0 200.0 14.0 14 200.0 811814206 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:10.601 55 NULL NULL 200.0 -400.0 -4.0 200.0 55.0 55 200.0 110230625780 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:08.046 -33 NULL NULL 200.0 -400.0 -4.0 200.0 -33.0 -33 200.0 -9274988019 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:57.47 6 NULL NULL 200.0 -400.0 -4.0 200.0 6.0 6 200.0 -7015614564 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:04.967 62 NULL NULL 200.0 -400.0 -4.0 200.0 62.0 62 200.0 61311056 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:00.893 22 NULL NULL 200.0 -400.0 -4.0 200.0 22.0 22 200.0 -39924557090 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:54.866 -26 NULL NULL 200.0 -400.0 -4.0 200.0 -26.0 -26 200.0 7325400810 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:15.296 -59 NULL NULL 200.0 -400.0 -4.0 200.0 -59.0 -59 200.0 -9757710398 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:09.539 7 NULL NULL 200.0 -400.0 -4.0 200.0 7.0 7 200.0 9345007252 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:57.349 -56 NULL NULL 200.0 -400.0 -4.0 200.0 -56.0 -56 200.0 -34216461496 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:09.111 -37 NULL NULL 200.0 -400.0 -4.0 200.0 -37.0 -37 200.0 -9024569730 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:03.333 -44 NULL NULL 200.0 -400.0 -4.0 200.0 -44.0 -44 200.0 79435713324 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:05.377 -52 NULL NULL 200.0 -400.0 -4.0 200.0 -52.0 -52 200.0 -97123836836 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:48.188 18 NULL NULL 200.0 -400.0 -4.0 200.0 18.0 18 200.0 -15013264662 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:15.091 -43 NULL NULL 200.0 -400.0 -4.0 200.0 -43.0 -43 200.0 -46861099946 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:55.829 0 NULL NULL 200.0 -400.0 -4.0 200.0 0.0 0 200.0 0 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:46.076 9 NULL NULL 200.0 -400.0 -4.0 200.0 9.0 9 200.0 -14202953316 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:15.969 8 NULL NULL 200.0 -400.0 -4.0 200.0 8.0 8 200.0 -9832802032 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:01.653 8 NULL NULL 200.0 -400.0 -4.0 200.0 8.0 8 200.0 -15661041184 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:51.063 58 NULL NULL 200.0 -400.0 -4.0 200.0 58.0 58 200.0 -25062091276 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:44.04 32 NULL NULL 200.0 -400.0 -4.0 200.0 32.0 32 200.0 -8229422560 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:11.08 -9 NULL NULL 200.0 -400.0 -4.0 200.0 -9.0 -9 200.0 -5470381665 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:03.446 -19 NULL NULL 200.0 -400.0 -4.0 200.0 -19.0 -19 200.0 10670477159 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:54.187 -45 NULL NULL 200.0 -400.0 -4.0 200.0 -45.0 -45 200.0 -63963827865 9763615.5639 -200.0 +-200.0 1969-12-31 16:00:16.178 -30 NULL NULL 200.0 -400.0 -4.0 200.0 -30.0 -30 200.0 47698035420 9763615.5639 -200.0 +-200.0 1969-12-31 15:59:50.618 -60 NULL NULL 200.0 -400.0 -4.0 200.0 -60.0 -60 200.0 -55670852400 9763615.5639 -200.0 +NULL 1969-12-31 16:00:08.451 -51 true nOF31ehjY7ULCHMf NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false uUTO41xk6VyqYPh NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 8AqHq NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true S0LP25K12US3 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 8Jvom23dkWvvqv81DY5Ub3 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false GlCK4Dw7uIb1bsY NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true 7bD30suWFdI4o5Jp6m NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 121307nh6r0H31Mg NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false J8p4pS3A8G75Ct2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true 1Iry1n1c NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false vmD7YLtKX0c4y2uU NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true 4k1RqRL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true 3StDSaH7 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 2M106hVFEhu NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 74nRe6WYOO7MD7632BOS NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true YX250 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false WUQQRWTJ1wK1H4 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false G2P1ogIIyMgo6j2a27egS NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 12yT2agBjx3yQ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false fkA37sOkxCp44hlIKV NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false rLL8VlwJ0P NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 37p34Jc2nloL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false c23S6Ky4w7Ld21lAbB NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 1M4eTm8OcOW2dAMV2V5slS1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true 3yeq763N NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false sU1VhRD0P3w47WU66 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true Cw412mnXhN1F NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false T0Gq3D4N50YY48AG8OQBqTU NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true ON30Mh8A8 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true 7LdfF1415i51qpmHQI NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 8l433e5J6I0fj0PM NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false EXWsAOlGYtb053ExF6u5FLyb NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true q2bIHkxaKKv7uD NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false JVCOfSTVb NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true M76D058tDDD25v3g NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false s038hX0U8 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false 4l6OX60y NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true jd4MshHSjPOuq1b2T NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false xgPW6tMwuNv67I0q2227 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false QRq4fxOau2jef55O5X1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false d3yQbTLvpGyi0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true Bb2AdwWmQOcwJhqF NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true OqM62X0G3j7XpBOTt70 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false tyt5Bwxxe NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false vgd8P8Ff1n NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 true j83cOtj22H5Aje7H3 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false QgA6r86x0JrfdHuM NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false d1N0u454kG87DN3o NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:08.451 -51 false J0VTT0R8t1JcxdoOO NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +12004.0 NULL NULL true a -12004.0 24008.0 17.0 -12004.0 NULL NULL -12004.0 NULL 9739207.5639 12004.0 +14468.0 NULL NULL true 3B3ubgg3B6a -14468.0 28936.0 28.0 -14468.0 NULL NULL -14468.0 NULL 9734279.5639 14468.0 +15601.0 1969-12-31 15:59:52.786 -1 NULL NULL -15601.0 31202.0 17.0 -15601.0 -1.0 -1 -15601.0 -672512361 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:05.334 22 NULL NULL -15601.0 31202.0 17.0 -15601.0 22.0 22 -15601.0 -41268959688 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:03.888 -23 NULL NULL -15601.0 31202.0 17.0 -15601.0 -23.0 -23 -15601.0 48400325149 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:56.481 10 NULL NULL -15601.0 31202.0 17.0 -15601.0 10.0 10 -15601.0 -12301158220 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:05.83 -49 NULL NULL -15601.0 31202.0 17.0 -15601.0 -49.0 -49 -15601.0 -11115220466 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:05.007 35 NULL NULL -15601.0 31202.0 17.0 -15601.0 35.0 35 -15601.0 74309762800 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:46.443 -43 NULL NULL -15601.0 31202.0 17.0 -15601.0 -43.0 -43 -15601.0 9618553900 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:44.115 -20 NULL NULL -15601.0 31202.0 17.0 -15601.0 -20.0 -20 -15601.0 5179862200 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:48.552 62 NULL NULL -15601.0 31202.0 17.0 -15601.0 62.0 62 -15601.0 -92267819432 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:46.33 24 NULL NULL -15601.0 31202.0 17.0 -15601.0 24.0 24 -15601.0 -41767499616 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:45.655 -23 NULL NULL -15601.0 31202.0 17.0 -15601.0 -23.0 -23 -15601.0 -14931660214 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:46.82 -46 NULL NULL -15601.0 31202.0 17.0 -15601.0 -46.0 -46 -15601.0 9610884144 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:57.729 30 NULL NULL -15601.0 31202.0 17.0 -15601.0 30.0 30 -15601.0 -59602621200 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:50.575 29 NULL NULL -15601.0 31202.0 17.0 -15601.0 29.0 29 -15601.0 -6410141150 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:46.258 -26 NULL NULL -15601.0 31202.0 17.0 -15601.0 -26.0 -26 -15601.0 -35132327672 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:54.384 -59 NULL NULL -15601.0 31202.0 17.0 -15601.0 -59.0 -59 -15601.0 -109154505771 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:47.436 -51 NULL NULL -15601.0 31202.0 17.0 -15601.0 -51.0 -51 -15601.0 -3541883598 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:48.71 -30 NULL NULL -15601.0 31202.0 17.0 -15601.0 -30.0 -30 -15601.0 -36655228350 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:04.063 33 NULL NULL -15601.0 31202.0 17.0 -15601.0 33.0 33 -15601.0 -47936367534 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:09.123 -14 NULL NULL -15601.0 31202.0 17.0 -15601.0 -14.0 -14 -15601.0 -14100538704 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:53.715 -44 NULL NULL -15601.0 31202.0 17.0 -15601.0 -44.0 -44 -15601.0 3342918304 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:58.704 -55 NULL NULL -15601.0 31202.0 17.0 -15601.0 -55.0 -55 -15601.0 73626727075 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:48.932 16 NULL NULL -15601.0 31202.0 17.0 -15601.0 16.0 16 -15601.0 NULL 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:11.928 -32 NULL NULL -15601.0 31202.0 17.0 -15601.0 -32.0 -32 -15601.0 -54463594144 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:02.401 30 NULL NULL -15601.0 31202.0 17.0 -15601.0 30.0 30 -15601.0 61004562030 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:51.153 -44 NULL NULL -15601.0 31202.0 17.0 -15601.0 -44.0 -44 -15601.0 17590906828 9732013.5639 15601.0 +15601.0 1969-12-31 16:00:14.175 -50 NULL NULL -15601.0 31202.0 17.0 -15601.0 -50.0 -50 -15601.0 -35817486300 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:54.253 -44 NULL NULL -15601.0 31202.0 17.0 -15601.0 -44.0 -44 -15601.0 -11984211184 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:54.524 52 NULL NULL -15601.0 31202.0 17.0 -15601.0 52.0 52 -15601.0 80665657592 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:52.778 4 NULL NULL -15601.0 31202.0 17.0 -15601.0 4.0 4 -15601.0 -3767539848 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:51.535 55 NULL NULL -15601.0 31202.0 17.0 -15601.0 55.0 55 -15601.0 -57431846615 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:51.645 10 NULL NULL -15601.0 31202.0 17.0 -15601.0 10.0 10 -15601.0 -10432519820 9732013.5639 15601.0 +15601.0 1969-12-31 15:59:53.279 -11 NULL NULL -15601.0 31202.0 17.0 -15601.0 -11.0 -11 -15601.0 -19558985941 9732013.5639 15601.0 +NULL 1969-12-31 16:00:15.892 8 false W4TEt52sKL0ndx4jeCahICDW NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true p3DvmcsqP6xMf NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true bc014i7354F36p NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false 0m8aHX5yF5muTQW NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false 8JNVrH3Lasa826 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true woiNv162mnSJ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true x6WK1U14M7IlWw NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true poE6hx8xV36vG NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false iStQPx6j8SvMc NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true 06Q47xVf1d5JSdb NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false e13dNAo71UXm4Yt1u NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false s3WL6smnb7 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true q6iS3txi22Rj22Ks4Dd NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false iEb04t2x333EF5wHoKRs6oKB NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false BwXBC7rU57 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false 31rhe NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false j2UTaANoWtpw2co6Nj3bR2UG NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true b NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true 07Hofhidd5ClnNx8jTl1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true pq2i0NL1cRlR3CpAj082 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true N334idEn4hyyO64 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false Xi7kOTT NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true 4A7p4HkPm01W0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true 8Fx0J88 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true Q0PCmMLk NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true i6G060 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false M3Vcm3o NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false eIyS41R32 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false 0siU5JLRoUBPi88Kenqg4 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false qI8k4Mf NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false 16qqkM5M66EMI3uWjWy NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false 1AQR8H78mO7jyb2PBF NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false fVgv88OvQR1BB7toX NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true 7GCfB5odqYDW1gq7iBWJ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true 1w7DPjq NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true 41PLN7aXgP57M4Rr3 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false uHkBp64 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true A30e7a8ia36g25YQc8xTXBgB NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true 7e6ntfBnB0m82i6k83 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true Pc18F2c6iW766Vd NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 false 4c2KT50dog5 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:15.892 8 true oibQ623k5v33kBUK8Q NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +8801.0 NULL NULL false 5712We1FSa -8801.0 17602.0 13.0 -8801.0 NULL NULL -8801.0 NULL 9745613.5639 8801.0 +14460.0 NULL NULL true hQAra -14460.0 28920.0 12.0 -14460.0 NULL NULL -14460.0 NULL 9734295.5639 14460.0 +-7196.0 1969-12-31 15:59:54.133 11 NULL NULL 7196.0 -14392.0 -4.0 7196.0 11.0 11 7196.0 13012660188 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:57.86 -52 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -52.0 -52 7196.0 1368083028 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:11.36 -53 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -53.0 -53 7196.0 71962864647 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:00.381 -2 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -2.0 -2 7196.0 -1206817104 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:04.233 26 NULL NULL 7196.0 -14392.0 -4.0 7196.0 26.0 26 7196.0 -15783340898 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:55.667 56 NULL NULL 7196.0 -14392.0 -4.0 7196.0 56.0 56 7196.0 -120146991496 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:13.231 53 NULL NULL 7196.0 -14392.0 -4.0 7196.0 53.0 53 7196.0 -36144071012 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:45.518 47 NULL NULL 7196.0 -14392.0 -4.0 7196.0 47.0 47 7196.0 81143089746 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:50.265 3 NULL NULL 7196.0 -14392.0 -4.0 7196.0 3.0 3 7196.0 -5006530458 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:13.787 24 NULL NULL 7196.0 -14392.0 -4.0 7196.0 24.0 24 7196.0 38316668352 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:51.009 -49 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -49.0 -49 7196.0 NULL 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:51.561 -35 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -35.0 -35 7196.0 -70617762705 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:06.848 -18 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -18.0 -18 7196.0 1982664288 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:52.969 -27 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -27.0 -27 7196.0 8967759183 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:13.816 31 NULL NULL 7196.0 -14392.0 -4.0 7196.0 31.0 31 7196.0 -56470642871 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:57.011 42 NULL NULL 7196.0 -14392.0 -4.0 7196.0 42.0 42 7196.0 -23099469372 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:53.686 -39 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -39.0 -39 7196.0 45315380682 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:08.418 41 NULL NULL 7196.0 -14392.0 -4.0 7196.0 41.0 41 7196.0 32453141435 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:44.292 -23 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -23.0 -23 7196.0 46033183457 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:08.373 60 NULL NULL 7196.0 -14392.0 -4.0 7196.0 60.0 60 7196.0 -119905930860 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:49.326 39 NULL NULL 7196.0 -14392.0 -4.0 7196.0 39.0 39 7196.0 -30362271264 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:48.929 -12 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -12.0 -12 7196.0 14774939436 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:01.22 -62 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -62.0 -62 7196.0 -103567870178 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:54.776 10 NULL NULL 7196.0 -14392.0 -4.0 7196.0 10.0 10 7196.0 -6713016290 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:15.923 15 NULL NULL 7196.0 -14392.0 -4.0 7196.0 15.0 15 7196.0 6481300020 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:55.492 14 NULL NULL 7196.0 -14392.0 -4.0 7196.0 14.0 14 7196.0 23828505764 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:11.703 -29 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -29.0 -29 7196.0 10213273940 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:53.145 -24 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -24.0 -24 7196.0 -19898664000 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:10.915 39 NULL NULL 7196.0 -14392.0 -4.0 7196.0 39.0 39 7196.0 -14094881658 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:15.188 -21 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -21.0 -21 7196.0 38372734386 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:56.135 -17 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -17.0 -17 7196.0 8662474406 9777607.5639 -7196.0 +-7196.0 1969-12-31 15:59:50.462 56 NULL NULL 7196.0 -14392.0 -4.0 7196.0 56.0 56 7196.0 -118616357552 9777607.5639 -7196.0 +-7196.0 1969-12-31 16:00:01.088 -16 NULL NULL 7196.0 -14392.0 -4.0 7196.0 -16.0 -16 7196.0 -7507617424 9777607.5639 -7196.0 +NULL 1969-12-31 16:00:02.351 11 true 70070HP7Kb8Lrj NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true s456h8r2b0jAt4Ni3qopHCxS NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false woeLEb NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false gk0kJenBW237uQoxGBx36 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false 0rtl1C NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true 1V07gCB41Psbr5xtLiK4E NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true a3EhVU6Wuy7ycJ7wY7h2gv NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true F8iVJQQdC6O4 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true 8s0kR1e4QVV7QO NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true kfUgQ2uGN8a NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false 6a421YV NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true 16L335OgyOKH4565 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false G2s1ly NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true Nmt6E360X6dpX58CR2 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true 8tVuiCkFtGW5KX NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true ySAfuiG2vJNn5TR5 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true gjsL355dId0aH1mj0yGky1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true 6t557nSSrg1s0Q NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true N2Jfon7dyCN2Pmm1JA NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true 3Fhv1QY7Y776eQ38a NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false SN5NB5L3gpe2RtR2w50sNAd NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false M7xB374ixGAp NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true II1600yobW7p NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false y605nF0K3mMoM75j NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false w6173j NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false H5alUwndRKm NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false ve4Pgoehe6vhmYVLpP NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true yc2pX4jTI0xKh5xTys NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true 6AmfdSoTPmVvXdgM8CP20sx NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true cd6Xc861fDCGe NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true qlspyY30jeWkAcB1ptQ4co0 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false l3r8T4QgT63 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false K11m3K43m5XFX40RJm1q NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true 5NM44RohO4r6 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false Bgk2cxNJk7f4rMmW38Dl3S1 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true LP5AMypx5 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 true Bsi3VIb NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false V2NEmm6d0kLFGa5s01k NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false g552y0x1B4n NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false eicMhR0nJt12OH7IO2651bO NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +NULL 1969-12-31 16:00:02.351 11 false 1j3rth56N41X17c1S NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +11619.0 NULL NULL false lJ63qx87BLmdMfa -11619.0 23238.0 6.0 -11619.0 NULL NULL -11619.0 NULL 9739977.5639 11619.0 +12520.0 NULL NULL false S7UM6KgdxTofi6rwXBFa2a -12520.0 25040.0 26.0 -12520.0 NULL NULL -12520.0 NULL 9738175.5639 12520.0 +13167.0 NULL NULL true 4gBPJa -13167.0 26334.0 0.0 -13167.0 NULL NULL -13167.0 NULL 9736881.5639 13167.0 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_11.q.out b/ql/src/test/results/clientpositive/tez/vectorization_11.q.out new file mode 100644 index 0000000..078bbe8 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_11.q.out @@ -0,0 +1,76 @@ +PREHOOK: query: SELECT cstring1, + cboolean1, + cdouble, + ctimestamp1, + (-3728 * csmallint), + (cdouble - 9763215.5639), + (-(cdouble)), + ((-(cdouble)) + 6981), + (cdouble * -5638.15) +FROM alltypesorc +WHERE ((cstring2 = cstring1) + OR ((ctimestamp1 IS NULL) + AND (cstring1 LIKE '%a'))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cstring1, + cboolean1, + cdouble, + ctimestamp1, + (-3728 * csmallint), + (cdouble - 9763215.5639), + (-(cdouble)), + ((-(cdouble)) + 6981), + (cdouble * -5638.15) +FROM alltypesorc +WHERE ((cstring2 = cstring1) + OR ((ctimestamp1 IS NULL) + AND (cstring1 LIKE '%a'))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +JRN4nLo30dv0bRtsrJa true -4319.0 NULL 16101232 -9767534.5639 4319.0 11300.0 2.4351169849999998E7 +Sd8C6q6L7l72qsa false 5306.0 NULL -19780768 -9757909.5639 -5306.0 1675.0 -2.99160239E7 +0AP3HERf5Ra true 5045.0 NULL -18807760 -9758170.5639 -5045.0 1936.0 -2.844446675E7 +a true 12004.0 NULL -44750912 -9751211.5639 -12004.0 -5023.0 -6.76803526E7 +oTh026tl2Ena false -11198.0 NULL 41746144 -9774413.5639 11198.0 18179.0 6.3136003699999996E7 +1MJ884f1w6B38WBeya false -2575.0 NULL 9599600 -9765790.5639 2575.0 9556.0 1.4518236249999998E7 +0MPx71oMa false 6644.0 NULL -24768832 -9756571.5639 -6644.0 337.0 -3.7459868599999994E7 +067wD7F8YQ8h32jPa true -16012.0 NULL 59692736 -9779227.5639 16012.0 22993.0 9.02780578E7 +5xaNVvLa true 2315.0 NULL -8630320 -9760900.5639 -2315.0 4666.0 -1.305231725E7 +3B3ubgg3B6a true 14468.0 NULL -53936704 -9748747.5639 -14468.0 -7487.0 -8.157275419999999E7 +G5n81R5jjsG5Gp58vqNa false -3597.0 NULL 13409616 -9766812.5639 3597.0 10578.0 2.0280425549999997E7 +Vb8ub0i0Maa true -9883.0 NULL 36843824 -9773098.5639 9883.0 16864.0 5.5721836449999996E7 +QTTWGUR2P2b08Dn62ea false -16086.0 NULL 59968608 -9779301.5639 16086.0 23067.0 9.069528089999999E7 +60S63VPytWwf5Hu6j75cHa false -4739.0 NULL 17666992 -9767954.5639 4739.0 11720.0 2.6719192849999998E7 +5ctB5Don6vvjSc6a false -1786.0 NULL 6658208 -9765001.5639 1786.0 8767.0 1.0069735899999999E7 +5712We1FSa false 8801.0 NULL -32810128 -9754414.5639 -8801.0 -1820.0 -4.962135815E7 +7C1L24VM7Ya true 4122.0 NULL -15366816 -9759093.5639 -4122.0 2859.0 -2.3240454299999997E7 +FWCW47mXs2a true -6839.0 NULL 25495792 -9770054.5639 6839.0 13820.0 3.8559307849999994E7 +47x5248dXuiqta true -12888.0 NULL 48046464 -9776103.5639 12888.0 19869.0 7.266447719999999E7 +a false 3350.0 NULL -12488800 -9759865.5639 -3350.0 3631.0 -1.88878025E7 +w62rRn0DnCSWJ1ht6qWa false -5638.15 NULL 958096 -9768853.7139 5638.15 12619.15 3.1788735422499996E7 +hQAra true 14460.0 NULL -53906880 -9748755.5639 -14460.0 -7479.0 -8.1527649E7 +f3oGa8ByjMs5eo7462S84Aa false 4278.0 NULL -15948384 -9758937.5639 -4278.0 2703.0 -2.41200057E7 +LAFo0rFpPj1aW8Js4Scpa true 2719.0 NULL -10136432 -9760496.5639 -2719.0 4262.0 -1.533012985E7 +055VA1s2XC7q70aD8S0PLpa true -12485.0 NULL 46544080 -9775700.5639 12485.0 19466.0 7.039230275E7 +iS4P5128HY44wa false 3890.0 NULL -14501920 -9759325.5639 -3890.0 3091.0 -2.19324035E7 +lJ63qx87BLmdMfa false 11619.0 NULL -43315632 -9751596.5639 -11619.0 -4638.0 -6.5509664849999994E7 +a true -2944.0 NULL 10975232 -9766159.5639 2944.0 9925.0 1.65987136E7 +bBAKio7bAmQq7vIlsc8H14a true 1949.0 NULL -7265872 -9761266.5639 -1949.0 5032.0 -1.098875435E7 +L057p1HPpJsmA3a true -9542.0 NULL 35572576 -9772757.5639 9542.0 16523.0 5.37992273E7 +S7UM6KgdxTofi6rwXBFa2a false 12520.0 NULL -46674560 -9750695.5639 -12520.0 -5539.0 -7.0589638E7 +4gBPJa true 13167.0 NULL -49086576 -9750048.5639 -13167.0 -6186.0 -7.423752105E7 +Tt484a true 754.0 NULL -2810912 -9762461.5639 -754.0 6227.0 -4251165.1 +PMoJ1NvQoAm5a true 539.0 NULL -2009392 -9762676.5639 -539.0 6442.0 -3038962.8499999996 +kro4Xu41bB7hiFa false -3277.0 NULL 12216656 -9766492.5639 3277.0 10258.0 1.8476217549999997E7 +a true 4991.0 NULL -18606448 -9758224.5639 -4991.0 1990.0 -2.814000665E7 +G7Ve8Px6a7J0DafBodF8JMma false -1291.0 NULL 4812848 -9764506.5639 1291.0 8272.0 7278851.649999999 +OHG2wWD83Ba false 6914.0 NULL -25775392 -9756301.5639 -6914.0 67.0 -3.8982169099999994E7 +DUSKf88a false 6764.0 NULL -25216192 -9756451.5639 -6764.0 217.0 -3.8136446599999994E7 +hnq6hkAfna true 5926.0 NULL -22092128 -9757289.5639 -5926.0 1055.0 -3.34116769E7 +K7tGy146ydka false -1236.0 NULL 4607808 -9764451.5639 1236.0 8217.0 6968753.399999999 +eNsh5tYa false NULL NULL NULL NULL NULL NULL NULL +a true -5905.0 NULL 22013840 -9769120.5639 5905.0 12886.0 3.3293275749999996E7 +dun2EEixI701imr3d6a true -8352.0 NULL 31136256 -9771567.5639 8352.0 15333.0 4.70898288E7 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_16.q.out b/ql/src/test/results/clientpositive/tez/vectorization_16.q.out new file mode 100644 index 0000000..5d18b83 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_16.q.out @@ -0,0 +1,666 @@ +PREHOOK: query: EXPLAIN +SELECT cstring1, + cdouble, + ctimestamp1, + (cdouble - 9763215.5639), + (-((cdouble - 9763215.5639))), + COUNT(cdouble), + STDDEV_SAMP(cdouble), + (-(STDDEV_SAMP(cdouble))), + (STDDEV_SAMP(cdouble) * COUNT(cdouble)), + MIN(cdouble), + (9763215.5639 / cdouble), + (COUNT(cdouble) / -1.389), + STDDEV_SAMP(cdouble) +FROM alltypesorc +WHERE ((cstring2 LIKE '%b%') + AND ((cdouble >= -1.389) + OR (cstring1 < 'a'))) +GROUP BY cstring1, cdouble, ctimestamp1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT cstring1, + cdouble, + ctimestamp1, + (cdouble - 9763215.5639), + (-((cdouble - 9763215.5639))), + COUNT(cdouble), + STDDEV_SAMP(cdouble), + (-(STDDEV_SAMP(cdouble))), + (STDDEV_SAMP(cdouble) * COUNT(cdouble)), + MIN(cdouble), + (9763215.5639 / cdouble), + (COUNT(cdouble) / -1.389), + STDDEV_SAMP(cdouble) +FROM alltypesorc +WHERE ((cstring2 LIKE '%b%') + AND ((cdouble >= -1.389) + OR (cstring1 < 'a'))) +GROUP BY cstring1, cdouble, ctimestamp1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((cstring2 like '%b%') and ((cdouble >= -1.389) or (cstring1 < 'a'))) (type: boolean) + Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp) + outputColumnNames: cstring1, cdouble, ctimestamp1 + Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(cdouble), stddev_samp(cdouble), min(cdouble) + keys: cstring1 (type: string), cdouble (type: double), ctimestamp1 (type: timestamp) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp) + Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE + value expressions: _col3 (type: bigint), _col4 (type: struct), _col5 (type: double) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0), stddev_samp(VALUE._col1), min(VALUE._col2) + keys: KEY._col0 (type: string), KEY._col1 (type: double), KEY._col2 (type: timestamp) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: double), _col2 (type: timestamp), (_col1 - 9763215.5639) (type: double), (- (_col1 - 9763215.5639)) (type: double), _col3 (type: bigint), _col4 (type: double), (- _col4) (type: double), (_col4 * _col3) (type: double), _col5 (type: double), (9763215.5639 / _col1) (type: double), (_col3 / -1.389) (type: double), _col4 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2048 Data size: 62872 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT cstring1, + cdouble, + ctimestamp1, + (cdouble - 9763215.5639), + (-((cdouble - 9763215.5639))), + COUNT(cdouble), + STDDEV_SAMP(cdouble), + (-(STDDEV_SAMP(cdouble))), + (STDDEV_SAMP(cdouble) * COUNT(cdouble)), + MIN(cdouble), + (9763215.5639 / cdouble), + (COUNT(cdouble) / -1.389), + STDDEV_SAMP(cdouble) +FROM alltypesorc +WHERE ((cstring2 LIKE '%b%') + AND ((cdouble >= -1.389) + OR (cstring1 < 'a'))) +GROUP BY cstring1, cdouble, ctimestamp1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cstring1, + cdouble, + ctimestamp1, + (cdouble - 9763215.5639), + (-((cdouble - 9763215.5639))), + COUNT(cdouble), + STDDEV_SAMP(cdouble), + (-(STDDEV_SAMP(cdouble))), + (STDDEV_SAMP(cdouble) * COUNT(cdouble)), + MIN(cdouble), + (9763215.5639 / cdouble), + (COUNT(cdouble) / -1.389), + STDDEV_SAMP(cdouble) +FROM alltypesorc +WHERE ((cstring2 LIKE '%b%') + AND ((cdouble >= -1.389) + OR (cstring1 < 'a'))) +GROUP BY cstring1, cdouble, ctimestamp1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL 15601.0 1969-12-31 15:59:43.919 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:44.07 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:44.179 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:44.394 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:44.477 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:44.568 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:44.571 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:44.708 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:44.782 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:45.816 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:46.114 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:46.82 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:46.953 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:47.134 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:47.406 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:47.511 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:47.616 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:47.975 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:48.052 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:48.299 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:48.429 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:48.552 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:48.679 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:48.943 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:49.331 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:49.896 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:50.345 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:50.66 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:51.104 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:51.265 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:51.413 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:51.596 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:51.637 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:52.076 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:52.311 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:52.326 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:52.357 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:52.587 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:53.038 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:53.583 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:53.584 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:53.635 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:54.024 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:54.116 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:54.334 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:54.342 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:54.454 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:54.583 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:54.994 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:55.411 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:55.847 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:55.989 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:55.998 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:56.068 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:56.338 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:56.806 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:56.858 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:56.913 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:56.97 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:57.215 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:57.261 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:57.28 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:57.386 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:57.524 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:57.678 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:57.729 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:57.932 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:58.134 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:58.279 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:58.343 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 15:59:58.752 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.025 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.054 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.108 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.122 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.123 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.182 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.206 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.326 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.396 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.476 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.523 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.545 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.547 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.648 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.699 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.708 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.741 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.88 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.931 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:00.953 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:01.057 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:01.153 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:01.714 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:02.12 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:02.215 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:02.285 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:02.6 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:02.742 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:02.894 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:02.92 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:02.925 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:03.174 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:03.273 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:03.351 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:03.366 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:03.512 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:03.722 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:04.063 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:04.149 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:04.254 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:04.259 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:04.52 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:04.687 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:04.745 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:04.964 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:05.027 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:05.132 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:05.327 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:05.334 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:05.617 -9747614.5639 9747614.5639 2 0.0 -0.0 0.0 15601.0 625.8070356964297 -1.4398848092152627 0.0 +NULL 15601.0 1969-12-31 16:00:05.83 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:06.051 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:06.692 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:07.844 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:08.176 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:08.252 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:08.368 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:08.607 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:08.868 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:08.948 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:09.357 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:09.473 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:09.582 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:09.697 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.045 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.132 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.173 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.259 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.649 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.738 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.898 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.957 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:10.983 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:12.205 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:12.498 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:12.848 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:12.853 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:12.948 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:13.029 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:13.183 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:13.503 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:13.801 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:13.955 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:14.452 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:14.565 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:14.733 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:14.747 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:14.903 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:15.39 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:15.805 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:16.076 -9747614.5639 9747614.5639 1 0.0 -0.0 0.0 15601.0 625.8070356964297 -0.7199424046076314 0.0 +NULL 15601.0 1969-12-31 16:00:16.279 -9747614.5639 9747614.5639 2 0.0 -0.0 0.0 15601.0 625.8070356964297 -1.4398848092152627 0.0 +00iT08 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +02VRbSC5I NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +03n0QGH NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +04w7DF25lHW4 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +060EnWLmWE4K8Pv NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0Apbh7X08i2JyMK NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0EIL81O NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0S3XIH2NDeS0xS NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0TN06s2WtHc NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0cg0haOcvRSlXg36n2k3k4 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0eBe1 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0iqrc5 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0lhcglI NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0m8aHX5yF5muTQW NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +0y7AJ4Mgm5KvSXXPh2802 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +12Y88CFE3600p4daxwcd1x NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +12YH5vxufod8Wu1R NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +12yT2agBjx3yQ NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +14272peG NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +15cWEp2JVNf8 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +164334b43QNUJ NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +16L335OgyOKH4565 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +16P2kxk NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +18330cCeptCu564M15 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1NydRD5y5o3 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1cO0m NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1cVy44 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1f4h0JU667ht28ergbmQ42 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1gDXGG5x1D1v67 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1gdr1s14ckUm4h0A6Qj NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1hy4qfv NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1j80NSLbNMdIc2H3R01D703 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1meQ3kXTFFWELpid NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1nnwS4QL88H4N4NItBY7Nje NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1pxO53oqqBm2 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +1sJei0Gh NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +216N1n3bRv NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +21UE6fJyy NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +21k073eUyWivL NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +225vmIW8L75bEWVwFc NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +22RO52O0M1M01M0Uk74eGx NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +24IGcUngY NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +252YCGI2DXxpdm7 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +25l26587m1fsM43r NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +25w0iMiN06MP NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +278v67J NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2FBdToh5748vG3p1f4A2Koql NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2Is2C874 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2Kkk1q2T8Wfedft NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2MCek73Rwx NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2MXQgy3CnV528om4I77x51i7 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2OQAraVYMghEPUOfSU8YV3 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2UTX78oBg574jiOyOy2 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2W4Kg220OcCy065HG60k6e NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2diFRgr78diK6rSl0J NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2fbAP8EJ4D5sArmrfUo3r NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2g07108CQP0nN6tb NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2h2qsp14cr NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2j2W3xc42VkSq4Nh NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2jU3jtuGteBoe0Cmf3gr NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2sJpP82Tgm NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2tV7k NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2vXyUmN8p0lFrAjL1q3wOB6 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2wgUNj08KLsG4wks06 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +2yK4Bx76O NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +31H4o7hC07b NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +32t5QB82iY3 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +33woPLwH3MFmK NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +38Y7wt NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3Bm0J3xwvp NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3Fhv1QY7Y776eQ38a NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3G0hB0J4W5 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3KS55 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3N1o1bou84BHA70 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3Qm5PpAGbhf8NkWHJPv NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3SaS218squQ6hlv5H76M0C7p NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3StDSaH7 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3abOQ1oI NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3d1IDSME4v0F0LJbBr NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3h8mD2F76eq4mS NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3ocGWW4eY55A NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3r3sDvfUkG0yTP3LnX5mNQRr NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +3y1D3A7yxnQenJs NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +40CP0hDas6g7m NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +40PQ82QY6 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +42NY72w NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +435oSIASgSON6 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +46Y3G8Rf12bRc7KcY NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +48xYJd1 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4A7p4HkPm01W0 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4E4kmNOo5dbi25IJPfr05To NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4FANhS2t7p58VJ NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4KhrrQ0nJ7bMNTvhSCA NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4LQe2Pd4m640E58XFA NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4Mk3721iRh6 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4QL5UDAU0u7 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4SLME5xxs7k NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4UtjbA8bV4lkm NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4Y6F2QEy0v68 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4dogOB620W83nFvbfA3H5su NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4eFGE3dwF5 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4fNIOF6ul NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4jGPKNFY4TP2K8Gw NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4kMasVoB7lX1wc5i64bNk NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4l6OX60y NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4s0J04m4B52 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4v3613837dytHDDLO NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4y5o6RndF NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4yAo7t54rr50u6Vci3p NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +4yCd7wSAHaHQj5f70x NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +54GiCgon04NXfnms6b5WRj3W NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +570Sgf1L12mIrag2hICI51t NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +57vi3IQLIES0Q16OTuiC4Hf7 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +58hP5c4e3S68K72k1tO1Edw NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5NM44RohO4r6 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5TVADgO1Sm3 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5UbQg8TK4M8M71HeMyjKE46W NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5Uh3u36dO NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5UuE7jmo6vi40e7 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5Vd7QcLbL4c1d3Xb38G NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5VexJO NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5Vypcl14RV5OcLe NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5b38BDVq7FrK342c0iI2w26H NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5b5ILkyshcQJ04 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5bd5T5FEdOrYRW00bvs NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5g8SC6Ol3gb0433c0B6 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5h04mA3qHKIDx05St0NNx NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5if5K NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5kiN628ldFC6 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5mPiHh NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +5ps7e8 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +603r01G4J NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +60KqhA NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +62iCPoy17 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +64Vxl8QS NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +678iebWrL34TlW1 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +68k8JcLTRwf8X2P7nE4X NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +68ri6 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6IWllEnT NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6Mf2X0s3 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6Ob80MBP350rI275 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6Pkr6mt6rI3Cno71h1EPb NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6PpbCyjf6c88b NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6V57hA NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6WRFtUnuF3scFWKkY4h782J NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6Weo4BXewS0 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6e5Vk3f3pMdefo NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6h6Kk4v030PNPj3Kc NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6mQ6vL4d NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6o50QhXglfo0TlCF NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6oAU0mBFKtwXOIAp7Yqi75H7 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6p0GBdNQ2l5m15T NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6qdYTwkc3L5LGy NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +6xn1INe8xSG0487IUAaMYRH1 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +707R5coSE4fhbU4ptKS1Y NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +71027fBh8760gbL7aF4K NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +716Tk0iWs7Y NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +74w2cGm0 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +758SskfjqM6DdFRN0a NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +75nB4HFf6o8qwf7gRdfNL NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +763gCfCExoaB1yJmP NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +767fOfF1Oj8fyOv6YFI16rM NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +76Xl5E7ttiejsqcvfJmtNB0 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +77IBEt1Or1c24vWPvigS3w13 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +78Pqc5 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7A80ue3836206PwI4 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7AJH2574A48M0I1wN NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7Dl7rr2aa2bfovt1yny5v NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7GeACqY0R NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7OnIvTMO27Hksu6 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7SND06C NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7i03i80 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7p5eY6u03Oc NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +7wH3hBKdO55Xq3gEEe0 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +82If7B6m5DWsXE8LE NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +886wwGvXf6 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +88dJOgqIlfUA411 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8F0hWV76XxO87NUJ7 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8F3j56 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8H81KcrcWG4xB NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8JNVrH3Lasa826 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8Pa8a8MJ24 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8QWCbCQMIc3bsI7 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8RYSCOw18284ncYbFjG2kq6 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8cn0K NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8k5161277021n NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8qG35U66qmjIeLy5Iir6Yy21 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8qhEui604mB8 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8r4JLW NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8x0kI0603QJ6sd0404n NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8xML5SQm27gN NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +8yLnMOGxRK4e0Nff NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +A1h6G3bgyRxxvyhyWhVL NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +A30e7a8ia36g25YQc8xTXBgB NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +A4T1b NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +A72HPe7U2Ss24o0mmt58YXMm NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +A74OqWUyE2kkH1o0Y NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +ALpMVq8Q6P01w6 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +AfW67EWaHMIQ7yvfqHRUwB NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +AmYxfSOBdJv8B48l0VAeeI NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Anj0oF NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +AtJMWIQ0TN4v1Vrj1pHI NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +BRL163CF0o NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +BYt5Ww10GR12r8jQffd25Q NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Bb2AdwWmQOcwJhqF NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Bu1QtYr5sfcMxyD2c650GW NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Byv03ok NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +C3s1RP5q7vW4B NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +CEIf818kp62v NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +CbQNlJb76sx257 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +CbULhCEo3m8Q357 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +CoMlAAYdRSe NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +CpJNPe416g82r NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +CtU2PW66tBCk0swxglxDIp2F NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +D6BS618N87J NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +DS4iDURlsq418pFh8 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Df7N7eedkot NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +DfTvU1F4hkNd5lJ4FGSe NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +DglR0T NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +E4JEjNiE NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +EX3K4E0EI1YiI1x NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Eo3tUJICSn2 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +EqAU5Jit8kJfgutgf0U7Ren5 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +EqUT4hfjoX45 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Fe4Bfs NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Fj7LiN85m NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +GS7Sinl7k2srPHIdC7xsu NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +GVsdgDhg NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +GY0R5v7a8x43DO5 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +H4fFjtoak NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +HA1yh NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Hf8123hK0 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +HfdKopI NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +I1be6JuP8HeaA8UI8c NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +I2p1w NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +I357kVmhkel010Hs16 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +I35E0Rr2 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +I884R85q1kn NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +IGG1BJ NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +IViYKd NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +IW8oEsDH0V0rY5U NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +IifFS03pnGO NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Iit87iX NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +IorWR NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +J6S681J6JPB2SD6Uc08U1 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +JH051GV4O3FyM7 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +JPrU65giKMJpNd0611w4qcF NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +JXySu NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Jj21024T2xdn6 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Jm1d3h3OxQE NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +JrReU7qfE NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Js07yFa2qnrfVU1j2e3 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +JvGVOip65N3hgA NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +KCaXaJvGKfj1tr NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +KDr0tMRnCJJIBA84 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +KIXnc1tg5tx7JUmV14 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +KKQ82Pvc NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +KXvq4OfKW641X0d4WHM2md0 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Kft68MpoAc4tLMS2ck3 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +KxewntCJ0mlktP NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +L0if56g18jb2G4ThBy8FLD NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +L417R4I8nG6Mps NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +L64VGc NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +LAi381BGdEy78j4ke NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +LCDBN0aaC17yk5kx8bq NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +LHow6beTFmm4fPjj43Qy NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +LKRvI78ReJ6OGetwpvK NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +LT14Ev NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +LXmcL8DQ616e NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +LdiBaUk NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +M3e586V3688s64J7j NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +M4HtnssfQiEAD0jYL6 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +M5TxI32kgu NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +M7xB374ixGAp NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +MCL83EIwhTq5L3clV2S1c8Q NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +MJ7Ej4tBYS8l2mK NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +MP6mdTJr380 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +MWoHbU5I00oL7X86882y8cou NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +MXefAh62BQEYn6T54AuUf NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Mk4tWJvwrb NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +MveCxn2pneC75WCdN76kovr NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +N17J6bKt243 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +N6BMOr83ecL NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +N6Dh6XreCWb0aA4nmDnFOO NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +N8222wByj NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +NABd3KhjjaVfcj2Q7SJ46 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Nmt6E360X6dpX58CR2 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +O2U2c43Dx4QtYQ3ynA1CLGI3 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +O65HL NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +OLq35YO3U NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +OSBq0b NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +OSc0r NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +OgARV6n1iMYIW1VUm1ybG NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Or43Y6lI NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +OxfCar17 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +P4shXtBlvn NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +PADsH06 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +PLFB86o84end3tdsS2hVL NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +PWAPwbw NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Pcj70ddpJ0iD NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +PnD8l5 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Q31pMN30tPv010W0U2h1s124 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Q72e8c NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +QOwp866GD0E0g3nwq NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +QRQRpg NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +QSdVNqav1efvKUht5o3N6 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +QT8H3G133r01VKlM3P45iP NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Qfy07 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Qgoscb7 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +R0mjxoFLf4 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +R875Td3QD NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +R8B6PMUCp8Fuw NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +RG57safmo8UjXo4c1230u NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +RVa8teOcCN NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +RaVXc0k4i2X NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +ReN3066RXtQ3 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +RsDHrL27QLW NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +S2XuI4SnrfBF NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Sf0Oqe1G NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +SrPY18L7FKBp8WO NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Sw74GCctTG3OmA1S330EC NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +T3qQxO7gFwJNh4Mb3 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +TD01cg4gOr1msv1b NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +TiI8AiopSL NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +To6s02tm NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +TrVt3076w4QSXF83Io NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Tt1BcY8q3welBr7o22KI3jF NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +U16wryUI NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +U83eH0Y8P1 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +UAx76nB02256 NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +UQv8T28745qO62T NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +UtFC8i5 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +V2075fV NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +V630OaEm NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +VAv3o4ihQU0V87NMwfyg31 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +ViqXS6s88N1yr14lj7I NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Vp5I58Cls2jANj NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +VqxF5T5p2bx7R1d4DB NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +WT37Vm67A7YcqB NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +WYv3r54T7Ct4h607XnR NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +WnN1oFEwhY4Heri3J7Jp8St NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +WxJ1m2qV553MQ5vgJG8cj NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +X1cNlHRHJ5h6H8qs832 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +X5pO0i1Yd6055F5FPNY NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +X81pl2c1Y NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +XA0uP5c61MU NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +XBfrKWaX68o7HCfKf NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +XOypj8 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +XWuYuk5qpn5Khs3764E56 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +XtF80FdC1a3Uw22G6GIPr NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Xtw4eM002sS1101p NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Xw6nBW1A205Rv7rE NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +XyG3M688p4eP46 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Y8q0gMXFDD4qo2nSC8 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +YCY6SM1FK83x0XYANbo NULL 1969-12-31 16:00:15.892 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +YRLL1E NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +YY7Ji0cFe7R1 NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +Ytgl8 NULL 1969-12-31 16:00:08.451 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL +YwV7DVLB0kut0S5p NULL 1969-12-31 16:00:02.351 NULL NULL 0 NULL NULL NULL NULL NULL -0.0 NULL diff --git a/ql/src/test/results/clientpositive/tez/vectorization_2.q.out b/ql/src/test/results/clientpositive/tez/vectorization_2.q.out new file mode 100644 index 0000000..709a75f --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_2.q.out @@ -0,0 +1,49 @@ +PREHOOK: query: SELECT AVG(csmallint), + (AVG(csmallint) % -563), + (AVG(csmallint) + 762), + SUM(cfloat), + VAR_POP(cbigint), + (-(VAR_POP(cbigint))), + (SUM(cfloat) - AVG(csmallint)), + COUNT(*), + (-((SUM(cfloat) - AVG(csmallint)))), + (VAR_POP(cbigint) - 762), + MIN(ctinyint), + ((-(VAR_POP(cbigint))) + MIN(ctinyint)), + AVG(cdouble), + (((-(VAR_POP(cbigint))) + MIN(ctinyint)) - SUM(cfloat)) +FROM alltypesorc +WHERE (((ctimestamp1 < ctimestamp2) + AND ((cstring2 LIKE 'b%') + AND (cfloat <= -5638.15))) + OR ((cdouble < ctinyint) + AND ((-10669 != ctimestamp2) + OR (359 > cint)))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT AVG(csmallint), + (AVG(csmallint) % -563), + (AVG(csmallint) + 762), + SUM(cfloat), + VAR_POP(cbigint), + (-(VAR_POP(cbigint))), + (SUM(cfloat) - AVG(csmallint)), + COUNT(*), + (-((SUM(cfloat) - AVG(csmallint)))), + (VAR_POP(cbigint) - 762), + MIN(ctinyint), + ((-(VAR_POP(cbigint))) + MIN(ctinyint)), + AVG(cdouble), + (((-(VAR_POP(cbigint))) + MIN(ctinyint)) - SUM(cfloat)) +FROM alltypesorc +WHERE (((ctimestamp1 < ctimestamp2) + AND ((cstring2 LIKE 'b%') + AND (cfloat <= -5638.15))) + OR ((cdouble < ctinyint) + AND ((-10669 != ctimestamp2) + OR (359 > cint)))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-5646.467075892857 -16.467075892856883 -4884.467075892857 -2839.634998679161 1.49936299222378778E18 -1.49936299222378778E18 2806.832077213696 3584 -2806.832077213696 1.49936299222378701E18 -64 -1.49936299222378778E18 -5650.1297631138395 -1.49936299222378496E18 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_3.q.out b/ql/src/test/results/clientpositive/tez/vectorization_3.q.out new file mode 100644 index 0000000..d797835 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_3.q.out @@ -0,0 +1,55 @@ +WARNING: Comparing a bigint and a double may result in a loss of precision. +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: SELECT STDDEV_SAMP(csmallint), + (STDDEV_SAMP(csmallint) - 10.175), + STDDEV_POP(ctinyint), + (STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)), + (-(STDDEV_POP(ctinyint))), + (STDDEV_SAMP(csmallint) % 79.553), + (-((STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)))), + STDDEV_SAMP(cfloat), + (-(STDDEV_SAMP(csmallint))), + SUM(cfloat), + ((-((STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)))) / (STDDEV_SAMP(csmallint) - 10.175)), + (-((STDDEV_SAMP(csmallint) - 10.175))), + AVG(cint), + (-3728 - STDDEV_SAMP(csmallint)), + STDDEV_POP(cint), + (AVG(cint) / STDDEV_SAMP(cfloat)) +FROM alltypesorc +WHERE (((cint <= cfloat) + AND ((79.553 != cbigint) + AND (ctimestamp2 = -29071))) + OR ((cbigint > cdouble) + AND ((79.553 <= csmallint) + AND (ctimestamp1 > ctimestamp2)))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT STDDEV_SAMP(csmallint), + (STDDEV_SAMP(csmallint) - 10.175), + STDDEV_POP(ctinyint), + (STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)), + (-(STDDEV_POP(ctinyint))), + (STDDEV_SAMP(csmallint) % 79.553), + (-((STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)))), + STDDEV_SAMP(cfloat), + (-(STDDEV_SAMP(csmallint))), + SUM(cfloat), + ((-((STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)))) / (STDDEV_SAMP(csmallint) - 10.175)), + (-((STDDEV_SAMP(csmallint) - 10.175))), + AVG(cint), + (-3728 - STDDEV_SAMP(csmallint)), + STDDEV_POP(cint), + (AVG(cint) / STDDEV_SAMP(cfloat)) +FROM alltypesorc +WHERE (((cint <= cfloat) + AND ((79.553 != cbigint) + AND (ctimestamp2 = -29071))) + OR ((cbigint > cdouble) + AND ((79.553 <= csmallint) + AND (ctimestamp1 > ctimestamp2)))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +0.0 -10.175 34.287285216637066 -0.0 -34.287285216637066 0.0 0.0 34.34690095515641 -0.0 197.89499950408936 -0.0 10.175 NULL -3728.0 NULL NULL diff --git a/ql/src/test/results/clientpositive/tez/vectorization_4.q.out b/ql/src/test/results/clientpositive/tez/vectorization_4.q.out new file mode 100644 index 0000000..0d6829f --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_4.q.out @@ -0,0 +1,49 @@ +PREHOOK: query: SELECT SUM(cint), + (SUM(cint) * -563), + (-3728 + SUM(cint)), + STDDEV_POP(cdouble), + (-(STDDEV_POP(cdouble))), + AVG(cdouble), + ((SUM(cint) * -563) % SUM(cint)), + (((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble)), + VAR_POP(cdouble), + (-((((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble)))), + ((-3728 + SUM(cint)) - (SUM(cint) * -563)), + MIN(ctinyint), + MIN(ctinyint), + (MIN(ctinyint) * (-((((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble))))) +FROM alltypesorc +WHERE (((csmallint >= cint) + OR ((-89010 >= ctinyint) + AND (cdouble > 79.553))) + OR ((-563 != cbigint) + AND ((ctinyint != cbigint) + OR (-3728 >= cdouble)))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(cint), + (SUM(cint) * -563), + (-3728 + SUM(cint)), + STDDEV_POP(cdouble), + (-(STDDEV_POP(cdouble))), + AVG(cdouble), + ((SUM(cint) * -563) % SUM(cint)), + (((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble)), + VAR_POP(cdouble), + (-((((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble)))), + ((-3728 + SUM(cint)) - (SUM(cint) * -563)), + MIN(ctinyint), + MIN(ctinyint), + (MIN(ctinyint) * (-((((SUM(cint) * -563) % SUM(cint)) / AVG(cdouble))))) +FROM alltypesorc +WHERE (((csmallint >= cint) + OR ((-89010 >= ctinyint) + AND (cdouble > 79.553))) + OR ((-563 != cbigint) + AND ((ctinyint != cbigint) + OR (-3728 >= cdouble)))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-493101012745 277615870175435 -493101016473 136727.7868296355 -136727.7868296355 2298.5515807767374 0 0.0 1.8694487691330246E10 -0.0 -278108971191908 -64 -64 0.0 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_5.q.out b/ql/src/test/results/clientpositive/tez/vectorization_5.q.out new file mode 100644 index 0000000..914a626 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_5.q.out @@ -0,0 +1,43 @@ +PREHOOK: query: SELECT MAX(csmallint), + (MAX(csmallint) * -75), + COUNT(*), + ((MAX(csmallint) * -75) / COUNT(*)), + (6981 * MAX(csmallint)), + MIN(csmallint), + (-(MIN(csmallint))), + (197 % ((MAX(csmallint) * -75) / COUNT(*))), + SUM(cint), + MAX(ctinyint), + (-(MAX(ctinyint))), + ((-(MAX(ctinyint))) + MAX(ctinyint)) +FROM alltypesorc +WHERE (((cboolean2 IS NOT NULL) + AND (cstring1 LIKE '%b%')) + OR ((ctinyint = cdouble) + AND ((ctimestamp2 IS NOT NULL) + AND (cstring2 LIKE 'a')))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT MAX(csmallint), + (MAX(csmallint) * -75), + COUNT(*), + ((MAX(csmallint) * -75) / COUNT(*)), + (6981 * MAX(csmallint)), + MIN(csmallint), + (-(MIN(csmallint))), + (197 % ((MAX(csmallint) * -75) / COUNT(*))), + SUM(cint), + MAX(ctinyint), + (-(MAX(ctinyint))), + ((-(MAX(ctinyint))) + MAX(ctinyint)) +FROM alltypesorc +WHERE (((cboolean2 IS NOT NULL) + AND (cstring1 LIKE '%b%')) + OR ((ctinyint = cdouble) + AND ((ctimestamp2 IS NOT NULL) + AND (cstring2 LIKE 'a')))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +16343 -1225725 1070 -1145.53738317757 114090483 -16307 16307 197.0 -26853917571 11 -11 0 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_6.q.out b/ql/src/test/results/clientpositive/tez/vectorization_6.q.out new file mode 100644 index 0000000..d73274f --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_6.q.out @@ -0,0 +1,1620 @@ +PREHOOK: query: SELECT cboolean1, + cfloat, + cstring1, + (988888 * csmallint), + (-(csmallint)), + (-(cfloat)), + (-26.28 / cfloat), + (cfloat * 359), + (cint % ctinyint), + (-(cdouble)), + (ctinyint - -75), + (762 * (cint % ctinyint)) +FROM alltypesorc +WHERE ((ctinyint != 0) + AND ((((cboolean1 <= 0) + AND (cboolean2 >= cboolean1)) + OR ((cbigint IS NOT NULL) + AND ((cstring2 LIKE '%a') + OR (cfloat <= -257)))))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT cboolean1, + cfloat, + cstring1, + (988888 * csmallint), + (-(csmallint)), + (-(cfloat)), + (-26.28 / cfloat), + (cfloat * 359), + (cint % ctinyint), + (-(cdouble)), + (ctinyint - -75), + (762 * (cint % ctinyint)) +FROM alltypesorc +WHERE ((ctinyint != 0) + AND ((((cboolean1 <= 0) + AND (cboolean2 >= cboolean1)) + OR ((cbigint IS NOT NULL) + AND ((cstring2 LIKE '%a') + OR (cfloat <= -257)))))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL 16.0 NULL -197777600 200 -16.0 -1.6425 5744.0 NULL 200.0 91 NULL +NULL -44.0 NULL -197777600 200 44.0 0.5972727272727273 -15796.0 NULL 200.0 31 NULL +NULL 34.0 NULL -197777600 200 -34.0 -0.7729411764705882 12206.0 NULL 200.0 109 NULL +NULL 4.0 NULL -197777600 200 -4.0 -6.57 1436.0 NULL 200.0 79 NULL +NULL 29.0 NULL -197777600 200 -29.0 -0.9062068965517242 10411.0 NULL 200.0 104 NULL +NULL 10.0 NULL -197777600 200 -10.0 -2.628 3590.0 NULL 200.0 85 NULL +NULL -34.0 NULL -197777600 200 34.0 0.7729411764705882 -12206.0 NULL 200.0 41 NULL +NULL 59.0 NULL -197777600 200 -59.0 -0.44542372881355935 21181.0 NULL 200.0 134 NULL +NULL -62.0 NULL -197777600 200 62.0 0.4238709677419355 -22258.0 NULL 200.0 13 NULL +NULL -36.0 NULL -197777600 200 36.0 0.73 -12924.0 NULL 200.0 39 NULL +NULL 10.0 NULL -197777600 200 -10.0 -2.628 3590.0 NULL 200.0 85 NULL +NULL -38.0 NULL -197777600 200 38.0 0.6915789473684211 -13642.0 NULL 200.0 37 NULL +NULL -43.0 NULL -197777600 200 43.0 0.6111627906976744 -15437.0 NULL 200.0 32 NULL +false -51.0 LFgU5WT87C2yJ4W4YU0r8Pp NULL NULL 51.0 0.5152941176470588 -18309.0 -25 NULL 24 -19050 +false -51.0 75bFXC7TqGo1SEaYAx4C58m NULL NULL 51.0 0.5152941176470588 -18309.0 NULL NULL 24 NULL +false -51.0 v3p153e2bSkGS70v04G NULL NULL 51.0 0.5152941176470588 -18309.0 3 NULL 24 2286 +false -51.0 0pOH7A4O8aQ37NuBqn NULL NULL 51.0 0.5152941176470588 -18309.0 32 NULL 24 24384 +false -51.0 8ShAFcD734S8Q26WjMwpq0Q NULL NULL 51.0 0.5152941176470588 -18309.0 39 NULL 24 29718 +false -51.0 t32s57Cjt4a250qQgVNAB5T NULL NULL 51.0 0.5152941176470588 -18309.0 -30 NULL 24 -22860 +false -51.0 M152O NULL NULL 51.0 0.5152941176470588 -18309.0 -44 NULL 24 -33528 +false -51.0 FgJ7Hft6845s1766oyt82q NULL NULL 51.0 0.5152941176470588 -18309.0 28 NULL 24 21336 +false -51.0 XWIExC7NI3bqu6VhR14g2 NULL NULL 51.0 0.5152941176470588 -18309.0 -40 NULL 24 -30480 +false -51.0 2diFRgr78diK6rSl0J NULL NULL 51.0 0.5152941176470588 -18309.0 22 NULL 24 16764 +false -51.0 21UE6fJyy NULL NULL 51.0 0.5152941176470588 -18309.0 -40 NULL 24 -30480 +false -51.0 H3bTj310QaL012cPe NULL NULL 51.0 0.5152941176470588 -18309.0 3 NULL 24 2286 +false -51.0 7342q5oFQL8QIl7cO NULL NULL 51.0 0.5152941176470588 -18309.0 -41 NULL 24 -31242 +false -51.0 m4eSLx4qihVg1e32 NULL NULL 51.0 0.5152941176470588 -18309.0 -3 NULL 24 -2286 +false -51.0 Tqar00A NULL NULL 51.0 0.5152941176470588 -18309.0 -49 NULL 24 -37338 +false -51.0 mC4mr NULL NULL 51.0 0.5152941176470588 -18309.0 -42 NULL 24 -32004 +false -51.0 2vtmB0qNlHlGV15P1p NULL NULL 51.0 0.5152941176470588 -18309.0 -31 NULL 24 -23622 +false -51.0 2wbgE0Yo1RX82H2sp4f1l5 NULL NULL 51.0 0.5152941176470588 -18309.0 -47 NULL 24 -35814 +false -51.0 H8mh48T7 NULL NULL 51.0 0.5152941176470588 -18309.0 27 NULL 24 20574 +false -51.0 U616In80F54RI NULL NULL 51.0 0.5152941176470588 -18309.0 41 NULL 24 31242 +false -51.0 BuSLb058f2 NULL NULL 51.0 0.5152941176470588 -18309.0 4 NULL 24 3048 +false -51.0 OSc0r NULL NULL 51.0 0.5152941176470588 -18309.0 -50 NULL 24 -38100 +false -51.0 75KN62a2iAf0j5Jol77wH7 NULL NULL 51.0 0.5152941176470588 -18309.0 -25 NULL 24 -19050 +false -51.0 66Mx4v NULL NULL 51.0 0.5152941176470588 -18309.0 -11 NULL 24 -8382 +false -51.0 XOypj8 NULL NULL 51.0 0.5152941176470588 -18309.0 48 NULL 24 36576 +false -51.0 61eT82N24 NULL NULL 51.0 0.5152941176470588 -18309.0 30 NULL 24 22860 +false -51.0 lVfv3fD1jn532h3K67H NULL NULL 51.0 0.5152941176470588 -18309.0 28 NULL 24 21336 +false -51.0 J1an665U NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 wXbLC0LS2bFf12f1ljC NULL NULL 51.0 0.5152941176470588 -18309.0 49 NULL 24 37338 +false -51.0 j0L50J2e82 NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 KDr0tMRnCJJIBA84 NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 71KN0p4NhE4xm4ixm NULL NULL 51.0 0.5152941176470588 -18309.0 -29 NULL 24 -22098 +false -51.0 Yc6gaH2OFF7cymt8q23Fr NULL NULL 51.0 0.5152941176470588 -18309.0 -20 NULL 24 -15240 +false -51.0 T0Y8Vi41EYW4CpQ6Hg1Xg30w NULL NULL 51.0 0.5152941176470588 -18309.0 33 NULL 24 25146 +false -51.0 Egf7KV7TeT NULL NULL 51.0 0.5152941176470588 -18309.0 -33 NULL 24 -25146 +false -51.0 LIJuG07tfqoLu8K NULL NULL 51.0 0.5152941176470588 -18309.0 -42 NULL 24 -32004 +false -51.0 uUTO41xk6VyqYPh NULL NULL 51.0 0.5152941176470588 -18309.0 34 NULL 24 25908 +false -51.0 8AqHq NULL NULL 51.0 0.5152941176470588 -18309.0 -1 NULL 24 -762 +false -51.0 gl03UrAU4bWrOvqwwf NULL NULL 51.0 0.5152941176470588 -18309.0 42 NULL 24 32004 +false -51.0 i330V4Y0Lm4ajyKqM1X2Y NULL NULL 51.0 0.5152941176470588 -18309.0 14 NULL 24 10668 +false -51.0 v3U315C36UQ4oEW NULL NULL 51.0 0.5152941176470588 -18309.0 18 NULL 24 13716 +false -51.0 p4WmTkrM NULL NULL 51.0 0.5152941176470588 -18309.0 1 NULL 24 762 +false -51.0 642LsMiNArr0ufitL3l7RCU7 NULL NULL 51.0 0.5152941176470588 -18309.0 2 NULL 24 1524 +false -51.0 DWNvg304j4KTMEs2174Cy1 NULL NULL 51.0 0.5152941176470588 -18309.0 -11 NULL 24 -8382 +false -51.0 44vcS2S5wu684R05fq01fu NULL NULL 51.0 0.5152941176470588 -18309.0 -33 NULL 24 -25146 +false -51.0 eu3X5Qfp4sHv5H NULL NULL 51.0 0.5152941176470588 -18309.0 -35 NULL 24 -26670 +false -51.0 YdG61y00526u5 NULL NULL 51.0 0.5152941176470588 -18309.0 45 NULL 24 34290 +false -51.0 0pOTqi3O44rEnGQ NULL NULL 51.0 0.5152941176470588 -18309.0 -25 NULL 24 -19050 +false -51.0 32cB3f NULL NULL 51.0 0.5152941176470588 -18309.0 34 NULL 24 25908 +false -51.0 c300w5 NULL NULL 51.0 0.5152941176470588 -18309.0 5 NULL 24 3810 +false -51.0 iR76SEs2C4V NULL NULL 51.0 0.5152941176470588 -18309.0 -34 NULL 24 -25908 +false -51.0 ss2PoJAipj6B1tn75O NULL NULL 51.0 0.5152941176470588 -18309.0 -49 NULL 24 -37338 +false -51.0 r17jGvc7gR NULL NULL 51.0 0.5152941176470588 -18309.0 -9 NULL 24 -6858 +false -51.0 5G1Xp277YJRklEO5kHx NULL NULL 51.0 0.5152941176470588 -18309.0 30 NULL 24 22860 +false -51.0 yRtwkNoJ5b6x0HJ0fxP NULL NULL 51.0 0.5152941176470588 -18309.0 -42 NULL 24 -32004 +false -51.0 XA0uP5c61MU NULL NULL 51.0 0.5152941176470588 -18309.0 -30 NULL 24 -22860 +false -51.0 KcGTq8B5161je52Gm NULL NULL 51.0 0.5152941176470588 -18309.0 30 NULL 24 22860 +false -51.0 U83eH0Y8P1 NULL NULL 51.0 0.5152941176470588 -18309.0 -33 NULL 24 -25146 +false -51.0 Y1gVqivH NULL NULL 51.0 0.5152941176470588 -18309.0 24 NULL 24 18288 +false -51.0 8Jvom23dkWvvqv81DY5Ub3 NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 o4N6pL88S2G2p78 NULL NULL 51.0 0.5152941176470588 -18309.0 8 NULL 24 6096 +false -51.0 18330cCeptCu564M15 NULL NULL 51.0 0.5152941176470588 -18309.0 34 NULL 24 25908 +false -51.0 MDKi1SBx5l6Sb NULL NULL 51.0 0.5152941176470588 -18309.0 48 NULL 24 36576 +false -51.0 545Gtyb6TO01J NULL NULL 51.0 0.5152941176470588 -18309.0 9 NULL 24 6858 +false -51.0 osFqC3JV6i1rRxe NULL NULL 51.0 0.5152941176470588 -18309.0 -27 NULL 24 -20574 +false -51.0 07rw6mP4WPoYcTNy1R NULL NULL 51.0 0.5152941176470588 -18309.0 -50 NULL 24 -38100 +false -51.0 8v3WfTYF315bFL NULL NULL 51.0 0.5152941176470588 -18309.0 -31 NULL 24 -23622 +false -51.0 gXu3tUhVtYp NULL NULL 51.0 0.5152941176470588 -18309.0 -24 NULL 24 -18288 +false -51.0 Iv4nCgiva NULL NULL 51.0 0.5152941176470588 -18309.0 7 NULL 24 5334 +false -51.0 PWAPwbw NULL NULL 51.0 0.5152941176470588 -18309.0 48 NULL 24 36576 +false -51.0 Iit87iX NULL NULL 51.0 0.5152941176470588 -18309.0 -34 NULL 24 -25908 +false -51.0 28KA13CH50X3tB0 NULL NULL 51.0 0.5152941176470588 -18309.0 15 NULL 24 11430 +false -51.0 GlCK4Dw7uIb1bsY NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 Y1vK3 NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 5OtqBAUJVYmw824aXp7 NULL NULL 51.0 0.5152941176470588 -18309.0 12 NULL 24 9144 +false -51.0 p35H22v36j NULL NULL 51.0 0.5152941176470588 -18309.0 -18 NULL 24 -13716 +false -51.0 50f35 NULL NULL 51.0 0.5152941176470588 -18309.0 25 NULL 24 19050 +false -51.0 x8IaCF6n4u NULL NULL 51.0 0.5152941176470588 -18309.0 -12 NULL 24 -9144 +false -51.0 Sf0Oqe1G NULL NULL 51.0 0.5152941176470588 -18309.0 -40 NULL 24 -30480 +false -51.0 P23cQyt NULL NULL 51.0 0.5152941176470588 -18309.0 8 NULL 24 6096 +false -51.0 6CwqchP12fO3J5Y NULL NULL 51.0 0.5152941176470588 -18309.0 -34 NULL 24 -25908 +false -51.0 8qVY4hgVfu4JW41cTi NULL NULL 51.0 0.5152941176470588 -18309.0 41 NULL 24 31242 +false -51.0 1ev82P6 NULL NULL 51.0 0.5152941176470588 -18309.0 -37 NULL 24 -28194 +false -51.0 55xSuTYE4361 NULL NULL 51.0 0.5152941176470588 -18309.0 9 NULL 24 6858 +false -51.0 wyxhxSCxs5 NULL NULL 51.0 0.5152941176470588 -18309.0 45 NULL 24 34290 +false -51.0 34N4EY63M1GFWuW0boW NULL NULL 51.0 0.5152941176470588 -18309.0 49 NULL 24 37338 +false -51.0 2oSudUNUX6 NULL NULL 51.0 0.5152941176470588 -18309.0 -35 NULL 24 -26670 +false -51.0 Kft68MpoAc4tLMS2ck3 NULL NULL 51.0 0.5152941176470588 -18309.0 -4 NULL 24 -3048 +false -51.0 wiMnfM1vb8WE0427eQ5Y6oJ5 NULL NULL 51.0 0.5152941176470588 -18309.0 -6 NULL 24 -4572 +true -51.0 7V65Eih84lc86QMJ2O NULL NULL 51.0 0.5152941176470588 -18309.0 -15 NULL 24 -11430 +false -51.0 TYkMYn1v6giCqpy30s NULL NULL 51.0 0.5152941176470588 -18309.0 -47 NULL 24 -35814 +false -51.0 121307nh6r0H31Mg NULL NULL 51.0 0.5152941176470588 -18309.0 10 NULL 24 7620 +false -51.0 YPJn4lAy8rr58 NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 aY3tpnr6wfvmWMG0U881 NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 KjAOvl4yBG7Rw7d NULL NULL 51.0 0.5152941176470588 -18309.0 -31 NULL 24 -23622 +false -51.0 EUl4i NULL NULL 51.0 0.5152941176470588 -18309.0 -46 NULL 24 -35052 +false -51.0 Oj17D50M3suPXf1J22R NULL NULL 51.0 0.5152941176470588 -18309.0 18 NULL 24 13716 +false -51.0 30J4VggeJfk6l24Wj3Q28 NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 74DT3mMTYm2eEjo3 NULL NULL 51.0 0.5152941176470588 -18309.0 -30 NULL 24 -22860 +false -51.0 10TYIE5S35U6dj3N NULL NULL 51.0 0.5152941176470588 -18309.0 -32 NULL 24 -24384 +false -51.0 4i11T6y6lT4073XW46yaalO NULL NULL 51.0 0.5152941176470588 -18309.0 47 NULL 24 35814 +false -51.0 MjLlK02ifGBIrla0EE NULL NULL 51.0 0.5152941176470588 -18309.0 -15 NULL 24 -11430 +false -51.0 J8p4pS3A8G75Ct2 NULL NULL 51.0 0.5152941176470588 -18309.0 13 NULL 24 9906 +false -51.0 VTJ74SnX0NTD2P234T55P5J NULL NULL 51.0 0.5152941176470588 -18309.0 23 NULL 24 17526 +false -51.0 6h6Kk4v030PNPj3Kc NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 2j6rY0poRw58s4ov2h NULL NULL 51.0 0.5152941176470588 -18309.0 50 NULL 24 38100 +false -51.0 vmD7YLtKX0c4y2uU NULL NULL 51.0 0.5152941176470588 -18309.0 10 NULL 24 7620 +false -51.0 48s0Wy10k NULL NULL 51.0 0.5152941176470588 -18309.0 18 NULL 24 13716 +false -51.0 SgVxsU2832X4w NULL NULL 51.0 0.5152941176470588 -18309.0 -11 NULL 24 -8382 +false -51.0 c61B47I604gymFJ NULL NULL 51.0 0.5152941176470588 -18309.0 NULL NULL 24 NULL +true -51.0 4Mk3721iRh6 NULL NULL 51.0 0.5152941176470588 -18309.0 8 NULL 24 6096 +false -51.0 MXefAh62BQEYn6T54AuUf NULL NULL 51.0 0.5152941176470588 -18309.0 -42 NULL 24 -32004 +false -51.0 eBRuEI2 NULL NULL 51.0 0.5152941176470588 -18309.0 4 NULL 24 3048 +false -51.0 2W4Kg220OcCy065HG60k6e NULL NULL 51.0 0.5152941176470588 -18309.0 -46 NULL 24 -35052 +false -51.0 a1N8y NULL NULL 51.0 0.5152941176470588 -18309.0 -48 NULL 24 -36576 +false -51.0 10 NULL NULL 51.0 0.5152941176470588 -18309.0 37 NULL 24 28194 +false -51.0 8cC24gh NULL NULL 51.0 0.5152941176470588 -18309.0 -4 NULL 24 -3048 +false -51.0 igMQ8 NULL NULL 51.0 0.5152941176470588 -18309.0 -7 NULL 24 -5334 +false -51.0 G6KW4uOD55dfWK NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 A43eyp8856SP83 NULL NULL 51.0 0.5152941176470588 -18309.0 -8 NULL 24 -6096 +false -51.0 pm52t42Yfhm NULL NULL 51.0 0.5152941176470588 -18309.0 31 NULL 24 23622 +false -51.0 J637uL7i0V6x NULL NULL 51.0 0.5152941176470588 -18309.0 37 NULL 24 28194 +false -51.0 DPdyR NULL NULL 51.0 0.5152941176470588 -18309.0 17 NULL 24 12954 +false -51.0 gVS43C76q67h70Yi NULL NULL 51.0 0.5152941176470588 -18309.0 -22 NULL 24 -16764 +false -51.0 Sm7i8BB NULL NULL 51.0 0.5152941176470588 -18309.0 -18 NULL 24 -13716 +false -51.0 V630OaEm NULL NULL 51.0 0.5152941176470588 -18309.0 -35 NULL 24 -26670 +false -51.0 f8bmVVkEd2TmeFy7wKq11 NULL NULL 51.0 0.5152941176470588 -18309.0 -27 NULL 24 -20574 +false -51.0 4GEqmyTpaQ NULL NULL 51.0 0.5152941176470588 -18309.0 -23 NULL 24 -17526 +false -51.0 5C26Uu6I1Dd7e1xcwSi0FR0 NULL NULL 51.0 0.5152941176470588 -18309.0 34 NULL 24 25908 +false -51.0 EThN3q3g4GbNl1hj1DI6M NULL NULL 51.0 0.5152941176470588 -18309.0 34 NULL 24 25908 +false -51.0 OUUn180cqH5Gf1sO NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 2gaHj NULL NULL 51.0 0.5152941176470588 -18309.0 8 NULL 24 6096 +false -51.0 A6F00275L4jx8tNc NULL NULL 51.0 0.5152941176470588 -18309.0 21 NULL 24 16002 +false -51.0 iINw0m NULL NULL 51.0 0.5152941176470588 -18309.0 -34 NULL 24 -25908 +false -51.0 TxE436GJgq7 NULL NULL 51.0 0.5152941176470588 -18309.0 -3 NULL 24 -2286 +false -51.0 4I23s0o7xIji73bi3y74T5ql NULL NULL 51.0 0.5152941176470588 -18309.0 3 NULL 24 2286 +false -51.0 8U0bLsWq8444DJ5TW NULL NULL 51.0 0.5152941176470588 -18309.0 -23 NULL 24 -17526 +false -51.0 RigNg NULL NULL 51.0 0.5152941176470588 -18309.0 33 NULL 24 25146 +false -51.0 uD02Qi4 NULL NULL 51.0 0.5152941176470588 -18309.0 48 NULL 24 36576 +false -51.0 7X8C04JN7LRyG NULL NULL 51.0 0.5152941176470588 -18309.0 46 NULL 24 35052 +false -51.0 78Mf2pj8fKk5Sq2L8 NULL NULL 51.0 0.5152941176470588 -18309.0 -25 NULL 24 -19050 +false -51.0 oA5OK2dVknje1w7uS3862Da5 NULL NULL 51.0 0.5152941176470588 -18309.0 28 NULL 24 21336 +true -51.0 4Mn8007R4LoxG NULL NULL 51.0 0.5152941176470588 -18309.0 18 NULL 24 13716 +false -51.0 13AA4buw5j0xj33Fie0FAl5 NULL NULL 51.0 0.5152941176470588 -18309.0 8 NULL 24 6096 +false -51.0 vAHn7p7mxOGYk30547 NULL NULL 51.0 0.5152941176470588 -18309.0 36 NULL 24 27432 +false -51.0 Jk1t16oBoeM0CCry7XQvR37h NULL NULL 51.0 0.5152941176470588 -18309.0 NULL NULL 24 NULL +false -51.0 6e5Vk3f3pMdefo NULL NULL 51.0 0.5152941176470588 -18309.0 -2 NULL 24 -1524 +false -51.0 YXy2ny NULL NULL 51.0 0.5152941176470588 -18309.0 -22 NULL 24 -16764 +false -51.0 1BA21MegTTKR67HG3 NULL NULL 51.0 0.5152941176470588 -18309.0 5 NULL 24 3810 +false -51.0 Wbf0Mio NULL NULL 51.0 0.5152941176470588 -18309.0 23 NULL 24 17526 +false -51.0 BhVBA NULL NULL 51.0 0.5152941176470588 -18309.0 12 NULL 24 9144 +false -51.0 5oUu102B4tP7 NULL NULL 51.0 0.5152941176470588 -18309.0 -8 NULL 24 -6096 +false -51.0 8RYSCOw18284ncYbFjG2kq6 NULL NULL 51.0 0.5152941176470588 -18309.0 -36 NULL 24 -27432 +false -51.0 1kYyjHtA0 NULL NULL 51.0 0.5152941176470588 -18309.0 -2 NULL 24 -1524 +false -51.0 kbT07u8ct NULL NULL 51.0 0.5152941176470588 -18309.0 14 NULL 24 10668 +false -51.0 shMOr3b8w1F4F38D4wih0 NULL NULL 51.0 0.5152941176470588 -18309.0 -28 NULL 24 -21336 +false -51.0 6Mf2X0s3 NULL NULL 51.0 0.5152941176470588 -18309.0 20 NULL 24 15240 +false -51.0 e15NrPMW0E8yCvPO4DN NULL NULL 51.0 0.5152941176470588 -18309.0 -8 NULL 24 -6096 +false -51.0 0o5aasUct374Q NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 JC6BaR5i7 NULL NULL 51.0 0.5152941176470588 -18309.0 -9 NULL 24 -6858 +false -51.0 u8aUOdI0tuGW6xmxsKM18l NULL NULL 51.0 0.5152941176470588 -18309.0 28 NULL 24 21336 +false -51.0 OGXnr5s0B NULL NULL 51.0 0.5152941176470588 -18309.0 6 NULL 24 4572 +false -51.0 LXs6Xx05R8n6Yg NULL NULL 51.0 0.5152941176470588 -18309.0 -18 NULL 24 -13716 +false -51.0 5bd5T5FEdOrYRW00bvs NULL NULL 51.0 0.5152941176470588 -18309.0 20 NULL 24 15240 +false -51.0 pWxC5d20ub50yq8EJ8qpQ4h NULL NULL 51.0 0.5152941176470588 -18309.0 39 NULL 24 29718 +false -51.0 3cT82 NULL NULL 51.0 0.5152941176470588 -18309.0 -40 NULL 24 -30480 +false -51.0 U70UOCk8B7pI7k NULL NULL 51.0 0.5152941176470588 -18309.0 -13 NULL 24 -9906 +false -51.0 LADu77ed6bPf NULL NULL 51.0 0.5152941176470588 -18309.0 -7 NULL 24 -5334 +false -51.0 Exp3Ic8q2g8D2i347 NULL NULL 51.0 0.5152941176470588 -18309.0 14 NULL 24 10668 +false -51.0 2M106hVFEhu NULL NULL 51.0 0.5152941176470588 -18309.0 -29 NULL 24 -22098 +false -51.0 u85A6B NULL NULL 51.0 0.5152941176470588 -18309.0 -43 NULL 24 -32766 +false -51.0 74nRe6WYOO7MD7632BOS NULL NULL 51.0 0.5152941176470588 -18309.0 14 NULL 24 10668 +false -51.0 Jqk7D0nwmvre2d1AnH8qL5vl NULL NULL 51.0 0.5152941176470588 -18309.0 11 NULL 24 8382 +false -51.0 E1iWm444b NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 UtriJV4U5N2J7M NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 8xML5SQm27gN NULL NULL 51.0 0.5152941176470588 -18309.0 29 NULL 24 22098 +false -51.0 pBO8hHxcSeJh28 NULL NULL 51.0 0.5152941176470588 -18309.0 11 NULL 24 8382 +false -51.0 tjRnqs104Dh NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 3e0MAK75O1V4Vw2mNM1UiX23 NULL NULL 51.0 0.5152941176470588 -18309.0 12 NULL 24 9144 +false -51.0 P5X6554E66k NULL NULL 51.0 0.5152941176470588 -18309.0 46 NULL 24 35052 +false -51.0 ViqXS6s88N1yr14lj7I NULL NULL 51.0 0.5152941176470588 -18309.0 42 NULL 24 32004 +false -51.0 r2uhJH3 NULL NULL 51.0 0.5152941176470588 -18309.0 NULL NULL 24 NULL +false -51.0 WUQQRWTJ1wK1H4 NULL NULL 51.0 0.5152941176470588 -18309.0 16 NULL 24 12192 +false -51.0 gcGG4GVX7MxDB50GG7Mk NULL NULL 51.0 0.5152941176470588 -18309.0 33 NULL 24 25146 +false -51.0 6xm3103e5OE0C82nL3G NULL NULL 51.0 0.5152941176470588 -18309.0 -24 NULL 24 -18288 +false -51.0 w13G1635lvs30qJavVn NULL NULL 51.0 0.5152941176470588 -18309.0 9 NULL 24 6858 +false -51.0 G2P1ogIIyMgo6j2a27egS NULL NULL 51.0 0.5152941176470588 -18309.0 37 NULL 24 28194 +false -51.0 yW5M2tWxQ3NHs1 NULL NULL 51.0 0.5152941176470588 -18309.0 -34 NULL 24 -25908 +false -51.0 d8W5CN1kB6O6ovPhy1C3M NULL NULL 51.0 0.5152941176470588 -18309.0 47 NULL 24 35814 +false -51.0 WGPA8WlP5X NULL NULL 51.0 0.5152941176470588 -18309.0 13 NULL 24 9906 +false -51.0 I6FvRp84S2UGHl8orYl NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 e8HP8Yt7uoB NULL NULL 51.0 0.5152941176470588 -18309.0 -8 NULL 24 -6096 +false -51.0 yif2md2VvY NULL NULL 51.0 0.5152941176470588 -18309.0 -23 NULL 24 -17526 +false -51.0 n2nf0ncE1Vj NULL NULL 51.0 0.5152941176470588 -18309.0 -41 NULL 24 -31242 +false -51.0 EAP1B57a5132algoul51 NULL NULL 51.0 0.5152941176470588 -18309.0 45 NULL 24 34290 +false -51.0 7BojnC3DIBmmGo8 NULL NULL 51.0 0.5152941176470588 -18309.0 -23 NULL 24 -17526 +false -51.0 IIX7QoB77864R6qOfLfhNJI4 NULL NULL 51.0 0.5152941176470588 -18309.0 3 NULL 24 2286 +false -51.0 o7H1gvt5G6 NULL NULL 51.0 0.5152941176470588 -18309.0 -39 NULL 24 -29718 +false -51.0 3wlj3rr4GuYKMG6QxL64jT NULL NULL 51.0 0.5152941176470588 -18309.0 -19 NULL 24 -14478 +false -51.0 TgS6dAlI2w4y NULL NULL 51.0 0.5152941176470588 -18309.0 -49 NULL 24 -37338 +false -51.0 KCaXaJvGKfj1tr NULL NULL 51.0 0.5152941176470588 -18309.0 -17 NULL 24 -12954 +false -51.0 HP824Y7lQ7bvAhrEx NULL NULL 51.0 0.5152941176470588 -18309.0 10 NULL 24 7620 +false -51.0 vA254Q0K7g NULL NULL 51.0 0.5152941176470588 -18309.0 -6 NULL 24 -4572 +false -51.0 HjA52J2d64r1fFmBITy1 NULL NULL 51.0 0.5152941176470588 -18309.0 -4 NULL 24 -3048 +false -51.0 34P6jvO10s66T30S NULL NULL 51.0 0.5152941176470588 -18309.0 50 NULL 24 38100 +false -51.0 12yT2agBjx3yQ NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 30u668e NULL NULL 51.0 0.5152941176470588 -18309.0 32 NULL 24 24384 +false -51.0 Mn25o4t044QATs NULL NULL 51.0 0.5152941176470588 -18309.0 3 NULL 24 2286 +false -51.0 o78FOQh4Cb NULL NULL 51.0 0.5152941176470588 -18309.0 -40 NULL 24 -30480 +false -51.0 p77RYLpx2u NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 2X0XRt20B70F7B NULL NULL 51.0 0.5152941176470588 -18309.0 -20 NULL 24 -15240 +false -51.0 UA0H368kj NULL NULL 51.0 0.5152941176470588 -18309.0 -1 NULL 24 -762 +false -51.0 Yssb82rdfylDv4K NULL NULL 51.0 0.5152941176470588 -18309.0 NULL NULL 24 NULL +false -51.0 i1P3Wlat5EnBugL24oS4I3 NULL NULL 51.0 0.5152941176470588 -18309.0 -19 NULL 24 -14478 +false -51.0 fkA37sOkxCp44hlIKV NULL NULL 51.0 0.5152941176470588 -18309.0 34 NULL 24 25908 +false -51.0 rLL8VlwJ0P NULL NULL 51.0 0.5152941176470588 -18309.0 33 NULL 24 25146 +false -51.0 71027fBh8760gbL7aF4K NULL NULL 51.0 0.5152941176470588 -18309.0 8 NULL 24 6096 +false -51.0 PnD8l5 NULL NULL 51.0 0.5152941176470588 -18309.0 -37 NULL 24 -28194 +false -51.0 37p34Jc2nloL NULL NULL 51.0 0.5152941176470588 -18309.0 14 NULL 24 10668 +false -51.0 c23S6Ky4w7Ld21lAbB NULL NULL 51.0 0.5152941176470588 -18309.0 -29 NULL 24 -22098 +false -51.0 Wp8cr NULL NULL 51.0 0.5152941176470588 -18309.0 22 NULL 24 16764 +false -51.0 2g07108CQP0nN6tb NULL NULL 51.0 0.5152941176470588 -18309.0 -15 NULL 24 -11430 +false -51.0 60fNYu4mIaX7cI4y NULL NULL 51.0 0.5152941176470588 -18309.0 9 NULL 24 6858 +false -51.0 L0MMUTo8C5rj NULL NULL 51.0 0.5152941176470588 -18309.0 -29 NULL 24 -22098 +false -51.0 Ytgl8 NULL NULL 51.0 0.5152941176470588 -18309.0 30 NULL 24 22860 +false -51.0 ss NULL NULL 51.0 0.5152941176470588 -18309.0 -43 NULL 24 -32766 +false -51.0 gfML7L7et NULL NULL 51.0 0.5152941176470588 -18309.0 -50 NULL 24 -38100 +false -51.0 0OHV13 NULL NULL 51.0 0.5152941176470588 -18309.0 -3 NULL 24 -2286 +true -51.0 xjk22HQH0F0E161 NULL NULL 51.0 0.5152941176470588 -18309.0 -34 NULL 24 -25908 +false -51.0 1M4eTm8OcOW2dAMV2V5slS1 NULL NULL 51.0 0.5152941176470588 -18309.0 -37 NULL 24 -28194 +false -51.0 j1lyplu58dBa NULL NULL 51.0 0.5152941176470588 -18309.0 -26 NULL 24 -19812 +false -51.0 ah6jo34tl NULL NULL 51.0 0.5152941176470588 -18309.0 15 NULL 24 11430 +false -51.0 fbR231f NULL NULL 51.0 0.5152941176470588 -18309.0 -13 NULL 24 -9906 +false -51.0 e6F51mDOrN481rfhqk67lF40 NULL NULL 51.0 0.5152941176470588 -18309.0 -36 NULL 24 -27432 +false -51.0 lBoQXomNtF2131ymAFCB NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 84L7MdH7 NULL NULL 51.0 0.5152941176470588 -18309.0 28 NULL 24 21336 +false -51.0 ugwHoBG4yXt5uEB NULL NULL 51.0 0.5152941176470588 -18309.0 -42 NULL 24 -32004 +false -51.0 3HhL08q56583 NULL NULL 51.0 0.5152941176470588 -18309.0 20 NULL 24 15240 +false -51.0 L4nk83x6pU NULL NULL 51.0 0.5152941176470588 -18309.0 3 NULL 24 2286 +false -51.0 k7i5RkMq88H0s NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 61shR2LjQ NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 W4MsK1d70i NULL NULL 51.0 0.5152941176470588 -18309.0 38 NULL 24 28956 +false -51.0 V04OvF27208o NULL NULL 51.0 0.5152941176470588 -18309.0 30 NULL 24 22860 +false -51.0 QOwp866GD0E0g3nwq NULL NULL 51.0 0.5152941176470588 -18309.0 5 NULL 24 3810 +false -51.0 sU1VhRD0P3w47WU66 NULL NULL 51.0 0.5152941176470588 -18309.0 8 NULL 24 6096 +false -51.0 DqpcjoX3m2h4hj4721T2M NULL NULL 51.0 0.5152941176470588 -18309.0 -40 NULL 24 -30480 +false -51.0 RY01bhu1p0G NULL NULL 51.0 0.5152941176470588 -18309.0 12 NULL 24 9144 +false -51.0 O656pe22AVUYD1OG8O4 NULL NULL 51.0 0.5152941176470588 -18309.0 35 NULL 24 26670 +false -51.0 nF0c6J04lo3lD0GhK8b7n3g NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 4MUYUYLAD7d0lk70NJjc6LB6 NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 nSa8Lur3OP NULL NULL 51.0 0.5152941176470588 -18309.0 -6 NULL 24 -4572 +false -51.0 IifFS03pnGO NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 G86cmDjPo3 NULL NULL 51.0 0.5152941176470588 -18309.0 -26 NULL 24 -19812 +false -51.0 l1Syw NULL NULL 51.0 0.5152941176470588 -18309.0 22 NULL 24 16764 +false -51.0 2yK4Bx76O NULL NULL 51.0 0.5152941176470588 -18309.0 25 NULL 24 19050 +false -51.0 JH051GV4O3FyM7 NULL NULL 51.0 0.5152941176470588 -18309.0 50 NULL 24 38100 +false -51.0 N5sqt2k NULL NULL 51.0 0.5152941176470588 -18309.0 -23 NULL 24 -17526 +false -51.0 vG0u7vdbry6JR4K4B743G3 NULL NULL 51.0 0.5152941176470588 -18309.0 -4 NULL 24 -3048 +false -51.0 4fSnp6 NULL NULL 51.0 0.5152941176470588 -18309.0 43 NULL 24 32766 +false -51.0 R8EqThU NULL NULL 51.0 0.5152941176470588 -18309.0 -15 NULL 24 -11430 +false -51.0 tK61Btt3Vqln1aL8R NULL NULL 51.0 0.5152941176470588 -18309.0 25 NULL 24 19050 +false -51.0 54T2y NULL NULL 51.0 0.5152941176470588 -18309.0 4 NULL 24 3048 +false -51.0 nbcHJDu3 NULL NULL 51.0 0.5152941176470588 -18309.0 -12 NULL 24 -9144 +false -51.0 UfUD41M7m NULL NULL 51.0 0.5152941176470588 -18309.0 50 NULL 24 38100 +false -51.0 o4lvY20511w0EOX3P3I82p63 NULL NULL 51.0 0.5152941176470588 -18309.0 45 NULL 24 34290 +false -51.0 IXMkdqJHU46dVte76I3Cy36m NULL NULL 51.0 0.5152941176470588 -18309.0 39 NULL 24 29718 +false -51.0 FO81NX2MQ1Tv2 NULL NULL 51.0 0.5152941176470588 -18309.0 -36 NULL 24 -27432 +false -51.0 p34e30llmRd014J10sp NULL NULL 51.0 0.5152941176470588 -18309.0 25 NULL 24 19050 +false -51.0 xOjXs4YxT7sGOtEDP3l8HBN6 NULL NULL 51.0 0.5152941176470588 -18309.0 49 NULL 24 37338 +false -51.0 XeI6xQ2v1E NULL NULL 51.0 0.5152941176470588 -18309.0 -9 NULL 24 -6858 +false -51.0 Osyki0P18kNjc2k5 NULL NULL 51.0 0.5152941176470588 -18309.0 36 NULL 24 27432 +false -51.0 lo8y7 NULL NULL 51.0 0.5152941176470588 -18309.0 31 NULL 24 23622 +false -51.0 bU42b017V0K1G5v1L3B NULL NULL 51.0 0.5152941176470588 -18309.0 -39 NULL 24 -29718 +false -51.0 f6WR6jF NULL NULL 51.0 0.5152941176470588 -18309.0 -12 NULL 24 -9144 +false -51.0 T0Gq3D4N50YY48AG8OQBqTU NULL NULL 51.0 0.5152941176470588 -18309.0 32 NULL 24 24384 +false -51.0 ac38VdOhD4a0 NULL NULL 51.0 0.5152941176470588 -18309.0 45 NULL 24 34290 +false -51.0 R20lxgp NULL NULL 51.0 0.5152941176470588 -18309.0 42 NULL 24 32004 +false -51.0 38XES7ME0108oTOlH1I7BiWn NULL NULL 51.0 0.5152941176470588 -18309.0 -30 NULL 24 -22860 +false -51.0 5h04mA3qHKIDx05St0NNx NULL NULL 51.0 0.5152941176470588 -18309.0 34 NULL 24 25908 +false -51.0 QS5W14A NULL NULL 51.0 0.5152941176470588 -18309.0 27 NULL 24 20574 +false -51.0 2MCek73Rwx NULL NULL 51.0 0.5152941176470588 -18309.0 18 NULL 24 13716 +false -51.0 85cpPHm5B0GD NULL NULL 51.0 0.5152941176470588 -18309.0 -24 NULL 24 -18288 +false -51.0 1t2c87D721uxcFhn2 NULL NULL 51.0 0.5152941176470588 -18309.0 -50 NULL 24 -38100 +false -51.0 2x14G717LqcPA7Ic5 NULL NULL 51.0 0.5152941176470588 -18309.0 NULL NULL 24 NULL +false -51.0 4Cf7gWmeh3Gw3bHx50iT2 NULL NULL 51.0 0.5152941176470588 -18309.0 33 NULL 24 25146 +false -51.0 KJmChr2CEaA NULL NULL 51.0 0.5152941176470588 -18309.0 -1 NULL 24 -762 +false -51.0 886wwGvXf6 NULL NULL 51.0 0.5152941176470588 -18309.0 -47 NULL 24 -35814 +false -51.0 1R480AiLgVaTEIcn3hUy8X NULL NULL 51.0 0.5152941176470588 -18309.0 27 NULL 24 20574 +false -51.0 8Pa8a8MJ24 NULL NULL 51.0 0.5152941176470588 -18309.0 20 NULL 24 15240 +false -51.0 thN7LFe7EQ5A74m3s0 NULL NULL 51.0 0.5152941176470588 -18309.0 6 NULL 24 4572 +false -51.0 ktJI200FR0TY4Oq NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 8l433e5J6I0fj0PM NULL NULL 51.0 0.5152941176470588 -18309.0 -50 NULL 24 -38100 +false -51.0 b4ntuTq8cuj0E66Gakn NULL NULL 51.0 0.5152941176470588 -18309.0 -33 NULL 24 -25146 +false -51.0 MFH46gf1UMw2xqJS6VO820 NULL NULL 51.0 0.5152941176470588 -18309.0 24 NULL 24 18288 +false -51.0 451H003P8UYu2 NULL NULL 51.0 0.5152941176470588 -18309.0 19 NULL 24 14478 +false -51.0 pJ8yNFwgS57SUhSORhpcu NULL NULL 51.0 0.5152941176470588 -18309.0 50 NULL 24 38100 +false -51.0 CqdMb86r52TC3NgM187 NULL NULL 51.0 0.5152941176470588 -18309.0 -35 NULL 24 -26670 +false -51.0 FdnoO3o3TWb NULL NULL 51.0 0.5152941176470588 -18309.0 -28 NULL 24 -21336 +false -51.0 WnN1oFEwhY4Heri3J7Jp8St NULL NULL 51.0 0.5152941176470588 -18309.0 -39 NULL 24 -29718 +false -51.0 678iebWrL34TlW1 NULL NULL 51.0 0.5152941176470588 -18309.0 -18 NULL 24 -13716 +false -51.0 V5O0Paqve81yx8E223UpK17 NULL NULL 51.0 0.5152941176470588 -18309.0 2 NULL 24 1524 +false -51.0 OyQm637Y8T5223y1Ha20q70G NULL NULL 51.0 0.5152941176470588 -18309.0 28 NULL 24 21336 +false -51.0 4RpFMC366k71GL1j5Xd5 NULL NULL 51.0 0.5152941176470588 -18309.0 25 NULL 24 19050 +false -51.0 2dU734cvN0P2k65CE NULL NULL 51.0 0.5152941176470588 -18309.0 -14 NULL 24 -10668 +false -51.0 iD4A3pEIP5pkv3 NULL NULL 51.0 0.5152941176470588 -18309.0 -35 NULL 24 -26670 +false -51.0 B350G70tUHdR4F5331F NULL NULL 51.0 0.5152941176470588 -18309.0 11 NULL 24 8382 +false -51.0 5BkJb NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 rQHT5hx NULL NULL 51.0 0.5152941176470588 -18309.0 -43 NULL 24 -32766 +false -51.0 LSGQPxLff8bpk NULL NULL 51.0 0.5152941176470588 -18309.0 -27 NULL 24 -20574 +false -51.0 f448c4T81BR NULL NULL 51.0 0.5152941176470588 -18309.0 -17 NULL 24 -12954 +false -51.0 0YAn3Qyo NULL NULL 51.0 0.5152941176470588 -18309.0 3 NULL 24 2286 +false -51.0 Ej05nrdc8CVXYu1Axy6W NULL NULL 51.0 0.5152941176470588 -18309.0 -28 NULL 24 -21336 +false -51.0 6Dnq5hvbkk NULL NULL 51.0 0.5152941176470588 -18309.0 -23 NULL 24 -17526 +false -51.0 8Y7yHw NULL NULL 51.0 0.5152941176470588 -18309.0 13 NULL 24 9906 +false -51.0 xA37f0CS8837b3uDhW7IJV0 NULL NULL 51.0 0.5152941176470588 -18309.0 43 NULL 24 32766 +false -51.0 KXvq4OfKW641X0d4WHM2md0 NULL NULL 51.0 0.5152941176470588 -18309.0 35 NULL 24 26670 +false -51.0 2eJegODpls2LBS2vAFl1OvQ NULL NULL 51.0 0.5152941176470588 -18309.0 -23 NULL 24 -17526 +false -51.0 Bu1QtYr5sfcMxyD2c650GW NULL NULL 51.0 0.5152941176470588 -18309.0 -26 NULL 24 -19812 +false -51.0 SCh73 NULL NULL 51.0 0.5152941176470588 -18309.0 9 NULL 24 6858 +false -51.0 AfW67EWaHMIQ7yvfqHRUwB NULL NULL 51.0 0.5152941176470588 -18309.0 -6 NULL 24 -4572 +false -51.0 D300Wwybt50R66GNV NULL NULL 51.0 0.5152941176470588 -18309.0 19 NULL 24 14478 +false -51.0 M5857hgh7234V88EX NULL NULL 51.0 0.5152941176470588 -18309.0 32 NULL 24 24384 +false -51.0 7M515cSr37Sj NULL NULL 51.0 0.5152941176470588 -18309.0 14 NULL 24 10668 +false -51.0 wVwuQ6dkmkcLxtfK8haA NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 tKyw2O2N NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 gcnk28ttRLv13O3ms6p10y NULL NULL 51.0 0.5152941176470588 -18309.0 31 NULL 24 23622 +false -51.0 BLyBF45iOWdg58oNy NULL NULL 51.0 0.5152941176470588 -18309.0 45 NULL 24 34290 +false -51.0 ffT4cTjYf2NJ NULL NULL 51.0 0.5152941176470588 -18309.0 -36 NULL 24 -27432 +false -51.0 oE25GuI6446Hq06G4f NULL NULL 51.0 0.5152941176470588 -18309.0 -26 NULL 24 -19812 +false -51.0 e8Yq6dHfa7d61IgPcKrO NULL NULL 51.0 0.5152941176470588 -18309.0 9 NULL 24 6858 +false -51.0 EXWsAOlGYtb053ExF6u5FLyb NULL NULL 51.0 0.5152941176470588 -18309.0 5 NULL 24 3810 +false -51.0 O3k76JCgFN83d58REWNvt243 NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 CoMlAAYdRSe NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +true -51.0 gtulO7xHeSn NULL NULL 51.0 0.5152941176470588 -18309.0 42 NULL 24 32004 +false -51.0 l01UYMiq51W8G4LJtEp86mD7 NULL NULL 51.0 0.5152941176470588 -18309.0 16 NULL 24 12192 +false -51.0 K31Po8dhUXDBDt NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 5d4rPb72As3cr1UU04go8 NULL NULL 51.0 0.5152941176470588 -18309.0 -24 NULL 24 -18288 +false -51.0 JVCOfSTVb NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 b NULL NULL 51.0 0.5152941176470588 -18309.0 -2 NULL 24 -1524 +false -51.0 72PfIF567Op NULL NULL 51.0 0.5152941176470588 -18309.0 39 NULL 24 29718 +false -51.0 0CkUHn44bl6xbyYLk NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 r7JrMe NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 73yDbT5WqsMNEB7FmJ3h NULL NULL 51.0 0.5152941176470588 -18309.0 -22 NULL 24 -16764 +false -51.0 U3pW0g NULL NULL 51.0 0.5152941176470588 -18309.0 -29 NULL 24 -22098 +false -51.0 00iT08 NULL NULL 51.0 0.5152941176470588 -18309.0 28 NULL 24 21336 +false -51.0 Sd20gdOoONPhK2OX4 NULL NULL 51.0 0.5152941176470588 -18309.0 -37 NULL 24 -28194 +false -51.0 Yts214m8mDhRw4F2d56 NULL NULL 51.0 0.5152941176470588 -18309.0 37 NULL 24 28194 +false -51.0 1324Nbqc0C7h6niurp77wT NULL NULL 51.0 0.5152941176470588 -18309.0 -35 NULL 24 -26670 +false -51.0 Bq245sjauEPf NULL NULL 51.0 0.5152941176470588 -18309.0 38 NULL 24 28956 +false -51.0 62JFFg7GbAn1 NULL NULL 51.0 0.5152941176470588 -18309.0 -30 NULL 24 -22860 +false -51.0 4W6pl6oLfgN0ax NULL NULL 51.0 0.5152941176470588 -18309.0 27 NULL 24 20574 +true -51.0 M76D058tDDD25v3g NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 1JGq6EC86Lc67B NULL NULL 51.0 0.5152941176470588 -18309.0 -26 NULL 24 -19812 +false -51.0 D5SANA44B8Jm NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 5K4lM3GNCDNNA4H5H NULL NULL 51.0 0.5152941176470588 -18309.0 -14 NULL 24 -10668 +false -51.0 nVp18XV4iVW217Vr4hb NULL NULL 51.0 0.5152941176470588 -18309.0 39 NULL 24 29718 +false -51.0 IFDa6Y1D4JuF50F2su708Wt NULL NULL 51.0 0.5152941176470588 -18309.0 47 NULL 24 35814 +false -51.0 4LQe2Pd4m640E58XFA NULL NULL 51.0 0.5152941176470588 -18309.0 -13 NULL 24 -9906 +false -51.0 Dtsb7s36eASJVh1Xi32K NULL NULL 51.0 0.5152941176470588 -18309.0 -9 NULL 24 -6858 +false -51.0 jXQPXUOT6OR75ChPwBr NULL NULL 51.0 0.5152941176470588 -18309.0 25 NULL 24 19050 +false -51.0 40PQ82QY6 NULL NULL 51.0 0.5152941176470588 -18309.0 -2 NULL 24 -1524 +false -51.0 33cr1j NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 8PpV88OGb NULL NULL 51.0 0.5152941176470588 -18309.0 39 NULL 24 29718 +true -51.0 q0YasY0Y17250cD NULL NULL 51.0 0.5152941176470588 -18309.0 -1 NULL 24 -762 +false -51.0 p8CvcP7et NULL NULL 51.0 0.5152941176470588 -18309.0 4 NULL 24 3048 +false -51.0 060EnWLmWE4K8Pv NULL NULL 51.0 0.5152941176470588 -18309.0 -19 NULL 24 -14478 +false -51.0 LrOMx3GjUHE614W7s36tp NULL NULL 51.0 0.5152941176470588 -18309.0 -2 NULL 24 -1524 +false -51.0 12YH5vxufod8Wu1R NULL NULL 51.0 0.5152941176470588 -18309.0 -7 NULL 24 -5334 +false -51.0 2G6B67cu1BUqRd3I52Ug20 NULL NULL 51.0 0.5152941176470588 -18309.0 -7 NULL 24 -5334 +false -51.0 3SaS218squQ6hlv5H76M0C7p NULL NULL 51.0 0.5152941176470588 -18309.0 22 NULL 24 16764 +false -51.0 2E41VxRBT043Jn6Ggf4no0O NULL NULL 51.0 0.5152941176470588 -18309.0 18 NULL 24 13716 +false -51.0 nuIwy NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 nvj0X NULL NULL 51.0 0.5152941176470588 -18309.0 42 NULL 24 32004 +false -51.0 KM06o1 NULL NULL 51.0 0.5152941176470588 -18309.0 -17 NULL 24 -12954 +false -51.0 HA1yh NULL NULL 51.0 0.5152941176470588 -18309.0 -17 NULL 24 -12954 +false -51.0 0S3XIH2NDeS0xS NULL NULL 51.0 0.5152941176470588 -18309.0 1 NULL 24 762 +false -51.0 72M1iL43IC7n NULL NULL 51.0 0.5152941176470588 -18309.0 44 NULL 24 33528 +false -51.0 d5gs2s6trx20upPuW3SAi4o NULL NULL 51.0 0.5152941176470588 -18309.0 -11 NULL 24 -8382 +false -51.0 2iVjtVVhM8R57oy NULL NULL 51.0 0.5152941176470588 -18309.0 34 NULL 24 25908 +false -51.0 VLVJ2YFurner0i58drukgj NULL NULL 51.0 0.5152941176470588 -18309.0 -47 NULL 24 -35814 +false -51.0 a5MyXRAIwPX1CO3w53Rar8wf NULL NULL 51.0 0.5152941176470588 -18309.0 -12 NULL 24 -9144 +false -51.0 c7j0PI24L0M27GoF43v4Ucf NULL NULL 51.0 0.5152941176470588 -18309.0 21 NULL 24 16002 +false -51.0 dv4kivc NULL NULL 51.0 0.5152941176470588 -18309.0 -4 NULL 24 -3048 +false -51.0 CJIO2 NULL NULL 51.0 0.5152941176470588 -18309.0 -48 NULL 24 -36576 +false -51.0 s038hX0U8 NULL NULL 51.0 0.5152941176470588 -18309.0 16 NULL 24 12192 +true -51.0 VfD3Byd4aV358l12 NULL NULL 51.0 0.5152941176470588 -18309.0 -6 NULL 24 -4572 +false -51.0 hANtHaOf NULL NULL 51.0 0.5152941176470588 -18309.0 -31 NULL 24 -23622 +false -51.0 jXpBexSQ3hC342hdkv NULL NULL 51.0 0.5152941176470588 -18309.0 33 NULL 24 25146 +false -51.0 702XRI NULL NULL 51.0 0.5152941176470588 -18309.0 10 NULL 24 7620 +false -51.0 w7PV8VhGA NULL NULL 51.0 0.5152941176470588 -18309.0 41 NULL 24 31242 +false -51.0 hw7e2oF7 NULL NULL 51.0 0.5152941176470588 -18309.0 35 NULL 24 26670 +false -51.0 n2L2mKJgQ08uGWsrgC30T NULL NULL 51.0 0.5152941176470588 -18309.0 -11 NULL 24 -8382 +false -51.0 m7i5sn7r0 NULL NULL 51.0 0.5152941176470588 -18309.0 -49 NULL 24 -37338 +false -51.0 15w3qCVPlsGoqbi1 NULL NULL 51.0 0.5152941176470588 -18309.0 -48 NULL 24 -36576 +false -51.0 VF8w7AjS6 NULL NULL 51.0 0.5152941176470588 -18309.0 48 NULL 24 36576 +false -51.0 U68Np7DCKJO8 NULL NULL 51.0 0.5152941176470588 -18309.0 -33 NULL 24 -25146 +false -51.0 4l6OX60y NULL NULL 51.0 0.5152941176470588 -18309.0 13 NULL 24 9906 +false -51.0 IblvAnYcnAwTiEM NULL NULL 51.0 0.5152941176470588 -18309.0 -47 NULL 24 -35814 +false -51.0 23R287wx8g5N22kp034161 NULL NULL 51.0 0.5152941176470588 -18309.0 -31 NULL 24 -23622 +false -51.0 O1fW6627aJkal NULL NULL 51.0 0.5152941176470588 -18309.0 13 NULL 24 9906 +false -51.0 A71P2rA NULL NULL 51.0 0.5152941176470588 -18309.0 46 NULL 24 35052 +false -51.0 8d4D1 NULL NULL 51.0 0.5152941176470588 -18309.0 -46 NULL 24 -35052 +false -51.0 tC57X NULL NULL 51.0 0.5152941176470588 -18309.0 -44 NULL 24 -33528 +false -51.0 qNaAh8CdJxxTG8y0 NULL NULL 51.0 0.5152941176470588 -18309.0 -27 NULL 24 -20574 +false -51.0 32OjMMVB54jv35 NULL NULL 51.0 0.5152941176470588 -18309.0 4 NULL 24 3048 +false -51.0 ljrUp5jPP3u6Y5i NULL NULL 51.0 0.5152941176470588 -18309.0 36 NULL 24 27432 +false -51.0 FGx13w3IFFT718DDr5 NULL NULL 51.0 0.5152941176470588 -18309.0 -22 NULL 24 -16764 +false -51.0 651rcX4uUheL07lI5m7 NULL NULL 51.0 0.5152941176470588 -18309.0 -18 NULL 24 -13716 +false -51.0 IbgbUvP5 NULL NULL 51.0 0.5152941176470588 -18309.0 -5 NULL 24 -3810 +false -51.0 rphq0n30wctykU8E NULL NULL 51.0 0.5152941176470588 -18309.0 43 NULL 24 32766 +false -51.0 2NR62NFR5 NULL NULL 51.0 0.5152941176470588 -18309.0 49 NULL 24 37338 +false -51.0 xJTkdBR4QU NULL NULL 51.0 0.5152941176470588 -18309.0 42 NULL 24 32004 +true -51.0 PYSh3CD1vxxH3Aq2B NULL NULL 51.0 0.5152941176470588 -18309.0 11 NULL 24 8382 +false -51.0 1Lh6Uoq3WhNtOqQHu7WN7U NULL NULL 51.0 0.5152941176470588 -18309.0 -22 NULL 24 -16764 +false -51.0 0TN06s2WtHc NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 Ad4KRAdOpE25j1BV NULL NULL 51.0 0.5152941176470588 -18309.0 -19 NULL 24 -14478 +false -51.0 sohL07P3D1W3aqMu2i NULL NULL 51.0 0.5152941176470588 -18309.0 10 NULL 24 7620 +false -51.0 04Yu8RntCU7amJtj NULL NULL 51.0 0.5152941176470588 -18309.0 -50 NULL 24 -38100 +false -51.0 Yv7NbK3bBtLv2oCp7g622yO NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 EPCRx8ObNv51rOF NULL NULL 51.0 0.5152941176470588 -18309.0 -9 NULL 24 -6858 +false -51.0 jin5N37sI8CpGW3x8X2v2 NULL NULL 51.0 0.5152941176470588 -18309.0 16 NULL 24 12192 +false -51.0 8eiti74gc5m01xyMKSjUIx NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 xgPW6tMwuNv67I0q2227 NULL NULL 51.0 0.5152941176470588 -18309.0 16 NULL 24 12192 +false -51.0 T5eOivl6F4ew1 NULL NULL 51.0 0.5152941176470588 -18309.0 1 NULL 24 762 +false -51.0 QRq4fxOau2jef55O5X1 NULL NULL 51.0 0.5152941176470588 -18309.0 -27 NULL 24 -20574 +false -51.0 y3VheNURDylWr0mse3mv0 NULL NULL 51.0 0.5152941176470588 -18309.0 -2 NULL 24 -1524 +false -51.0 en63YvV2PB76duGPhyLQa NULL NULL 51.0 0.5152941176470588 -18309.0 25 NULL 24 19050 +false -51.0 Yas32KF NULL NULL 51.0 0.5152941176470588 -18309.0 30 NULL 24 22860 +false -51.0 t6Y38CKxB3keFFwxHN1eQh NULL NULL 51.0 0.5152941176470588 -18309.0 7 NULL 24 5334 +false -51.0 TlU343q2ha8vt NULL NULL 51.0 0.5152941176470588 -18309.0 -5 NULL 24 -3810 +false -51.0 1o5T8oXJi5CAYe8540C NULL NULL 51.0 0.5152941176470588 -18309.0 -20 NULL 24 -15240 +false -51.0 F10SR3l5836pq7TCfYeGrEl1 NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 lcL6t NULL NULL 51.0 0.5152941176470588 -18309.0 -47 NULL 24 -35814 +false -51.0 3FXmaPtM8 NULL NULL 51.0 0.5152941176470588 -18309.0 41 NULL 24 31242 +false -51.0 1cVy44 NULL NULL 51.0 0.5152941176470588 -18309.0 30 NULL 24 22860 +false -51.0 H8LCu4M2u4f1S NULL NULL 51.0 0.5152941176470588 -18309.0 -28 NULL 24 -21336 +false -51.0 84O1C65C5k88bI7i4 NULL NULL 51.0 0.5152941176470588 -18309.0 NULL NULL 24 NULL +false -51.0 3h8mD2F76eq4mS NULL NULL 51.0 0.5152941176470588 -18309.0 -14 NULL 24 -10668 +false -51.0 8G82H54442m0AjgH3a4h NULL NULL 51.0 0.5152941176470588 -18309.0 42 NULL 24 32004 +false -51.0 2YOJT4Sveu NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 Das7E73 NULL NULL 51.0 0.5152941176470588 -18309.0 -20 NULL 24 -15240 +false -51.0 0863bBy3dkL74WtiERo3L NULL NULL 51.0 0.5152941176470588 -18309.0 -41 NULL 24 -31242 +false -51.0 d3yQbTLvpGyi0 NULL NULL 51.0 0.5152941176470588 -18309.0 16 NULL 24 12192 +false -51.0 216N1n3bRv NULL NULL 51.0 0.5152941176470588 -18309.0 -10 NULL 24 -7620 +false -51.0 3hF4a683G4Vc2N1 NULL NULL 51.0 0.5152941176470588 -18309.0 -38 NULL 24 -28956 +false -51.0 IwT2y4ak76hu1BgGDSKuI NULL NULL 51.0 0.5152941176470588 -18309.0 43 NULL 24 32766 +false -51.0 kih3Q NULL NULL 51.0 0.5152941176470588 -18309.0 -25 NULL 24 -19050 +false -51.0 CbULhCEo3m8Q357 NULL NULL 51.0 0.5152941176470588 -18309.0 48 NULL 24 36576 +false -51.0 oAYFcgT5 NULL NULL 51.0 0.5152941176470588 -18309.0 -11 NULL 24 -8382 +false -51.0 FBWY8rR466Y NULL NULL 51.0 0.5152941176470588 -18309.0 -11 NULL 24 -8382 +false -51.0 rreK1Bk70JwRIV3sQJEg NULL NULL 51.0 0.5152941176470588 -18309.0 40 NULL 24 30480 +false -51.0 EqUT4hfjoX45 NULL NULL 51.0 0.5152941176470588 -18309.0 16 NULL 24 12192 +false -51.0 s1K04o1 NULL NULL 51.0 0.5152941176470588 -18309.0 -5 NULL 24 -3810 +false -51.0 5a7WjXX5w1bkc8hv8Xx5LM NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 K2mrUY NULL NULL 51.0 0.5152941176470588 -18309.0 -43 NULL 24 -32766 +false -51.0 5f0u27Q1PvB1gCMn NULL NULL 51.0 0.5152941176470588 -18309.0 27 NULL 24 20574 +false -51.0 vvT8tpW518 NULL NULL 51.0 0.5152941176470588 -18309.0 -43 NULL 24 -32766 +false -51.0 M462UC NULL NULL 51.0 0.5152941176470588 -18309.0 -21 NULL 24 -16002 +false -51.0 2p0iX031016VDNb6KWJ NULL NULL 51.0 0.5152941176470588 -18309.0 -25 NULL 24 -19050 +false -51.0 KB3sgv2UcA152 NULL NULL 51.0 0.5152941176470588 -18309.0 -39 NULL 24 -29718 +false -51.0 M5TxI32kgu NULL NULL 51.0 0.5152941176470588 -18309.0 -39 NULL 24 -29718 +false -51.0 nhv8Bo2VCHouwa01x1 NULL NULL 51.0 0.5152941176470588 -18309.0 -43 NULL 24 -32766 +false -51.0 47xesJJ32Ia NULL NULL 51.0 0.5152941176470588 -18309.0 22 NULL 24 16764 +false -51.0 UFwddOjC38Fj NULL NULL 51.0 0.5152941176470588 -18309.0 -25 NULL 24 -19050 +false -51.0 EY2fCS NULL NULL 51.0 0.5152941176470588 -18309.0 19 NULL 24 14478 +false -51.0 ss NULL NULL 51.0 0.5152941176470588 -18309.0 -4 NULL 24 -3048 +false -51.0 TD01cg4gOr1msv1b NULL NULL 51.0 0.5152941176470588 -18309.0 22 NULL 24 16764 +false -51.0 80K4C NULL NULL 51.0 0.5152941176470588 -18309.0 3 NULL 24 2286 +false -51.0 4v3613837dytHDDLO NULL NULL 51.0 0.5152941176470588 -18309.0 42 NULL 24 32004 +false -51.0 aD88uS2N8DmqPlvjOa7F46i7 NULL NULL 51.0 0.5152941176470588 -18309.0 -38 NULL 24 -28956 +false -51.0 DS4iDURlsq418pFh8 NULL NULL 51.0 0.5152941176470588 -18309.0 -14 NULL 24 -10668 +false -51.0 plmMo28a0B5CtT63uC NULL NULL 51.0 0.5152941176470588 -18309.0 -44 NULL 24 -33528 +false -51.0 OBbyvnMMUh1iJ80EKnx178 NULL NULL 51.0 0.5152941176470588 -18309.0 15 NULL 24 11430 +false -51.0 8VOMo4k2fVr88MuEw72V6N NULL NULL 51.0 0.5152941176470588 -18309.0 22 NULL 24 16764 +false -51.0 Xc3mi NULL NULL 51.0 0.5152941176470588 -18309.0 -2 NULL 24 -1524 +false -51.0 tyt5Bwxxe NULL NULL 51.0 0.5152941176470588 -18309.0 47 NULL 24 35814 +false -51.0 2H45o NULL NULL 51.0 0.5152941176470588 -18309.0 -48 NULL 24 -36576 +false -51.0 M4O8OkhX3T1D2MMuf2Pm NULL NULL 51.0 0.5152941176470588 -18309.0 23 NULL 24 17526 +false -51.0 0f4422CBSl NULL NULL 51.0 0.5152941176470588 -18309.0 -40 NULL 24 -30480 +false -51.0 00MmJs1fiJp37y60mj4Ej8 NULL NULL 51.0 0.5152941176470588 -18309.0 -43 NULL 24 -32766 +false -51.0 hyi44EO7Eqi4QI1qQ7h NULL NULL 51.0 0.5152941176470588 -18309.0 18 NULL 24 13716 +false -51.0 M0J1l7pujAvtkGH NULL NULL 51.0 0.5152941176470588 -18309.0 32 NULL 24 24384 +false -51.0 4Y6F2QEy0v68 NULL NULL 51.0 0.5152941176470588 -18309.0 -43 NULL 24 -32766 +false -51.0 8nrs8SX553uTd63hTJ NULL NULL 51.0 0.5152941176470588 -18309.0 26 NULL 24 19812 +false -51.0 vgd8P8Ff1n NULL NULL 51.0 0.5152941176470588 -18309.0 -31 NULL 24 -23622 +false -51.0 Hf8123hK0 NULL NULL 51.0 0.5152941176470588 -18309.0 -24 NULL 24 -18288 +false -51.0 K4Npj34S8iAOa6qRd7y88Sb NULL NULL 51.0 0.5152941176470588 -18309.0 -12 NULL 24 -9144 +false -51.0 361M8OmUcKBPrFTcY5 NULL NULL 51.0 0.5152941176470588 -18309.0 -45 NULL 24 -34290 +false -51.0 F13clAHtHaUN2t6wLxE7S3T NULL NULL 51.0 0.5152941176470588 -18309.0 -47 NULL 24 -35814 +false -51.0 VDTWq NULL NULL 51.0 0.5152941176470588 -18309.0 -48 NULL 24 -36576 +false -51.0 sodtQ7I41ON4 NULL NULL 51.0 0.5152941176470588 -18309.0 -16 NULL 24 -12192 +false -51.0 DM3fMIDl770Nt083jjTQ2Uh NULL NULL 51.0 0.5152941176470588 -18309.0 0 NULL 24 0 +false -51.0 0EU2GSKN4svnsv NULL NULL 51.0 0.5152941176470588 -18309.0 -33 NULL 24 -25146 +false -51.0 IWNnWp4jmtO78 NULL NULL 51.0 0.5152941176470588 -18309.0 29 NULL 24 22098 +false -51.0 U7JukXmI NULL NULL 51.0 0.5152941176470588 -18309.0 46 NULL 24 35052 +false -51.0 WA6Cb1YeX7TOI7j3jnrh7W NULL NULL 51.0 0.5152941176470588 -18309.0 -46 NULL 24 -35052 +false -51.0 C6hoSE4L6NCrA NULL NULL 51.0 0.5152941176470588 -18309.0 -30 NULL 24 -22860 +false -51.0 62Q7DRed301Gx NULL NULL 51.0 0.5152941176470588 -18309.0 10 NULL 24 7620 +false -51.0 LgMBG6G3Oc5baLkjeP50i8 NULL NULL 51.0 0.5152941176470588 -18309.0 -47 NULL 24 -35814 +false -51.0 56EtJ6FmSp47bf0Jj NULL NULL 51.0 0.5152941176470588 -18309.0 -11 NULL 24 -8382 +false -51.0 6502UQ2Jb18nD7kNw NULL NULL 51.0 0.5152941176470588 -18309.0 -18 NULL 24 -13716 +false -51.0 6GpbwQ3mT NULL NULL 51.0 0.5152941176470588 -18309.0 -7 NULL 24 -5334 +true -51.0 S5RB5whaBLeLnMBAUm4oXX NULL NULL 51.0 0.5152941176470588 -18309.0 49 NULL 24 37338 +false -51.0 Ako362FErCK8F2v31h3Ns260 NULL NULL 51.0 0.5152941176470588 -18309.0 29 NULL 24 22098 +false -51.0 H8dq1J4bt18aF4W48 NULL NULL 51.0 0.5152941176470588 -18309.0 -38 NULL 24 -28956 +false -51.0 QgA6r86x0JrfdHuM NULL NULL 51.0 0.5152941176470588 -18309.0 21 NULL 24 16002 +false -51.0 d1N0u454kG87DN3o NULL NULL 51.0 0.5152941176470588 -18309.0 -27 NULL 24 -20574 +false -51.0 05oYA4ya5 NULL NULL 51.0 0.5152941176470588 -18309.0 -31 NULL 24 -23622 +false -51.0 wvd3uAAa01J6a6L NULL NULL 51.0 0.5152941176470588 -18309.0 -49 NULL 24 -37338 +false -51.0 J0VTT0R8t1JcxdoOO NULL NULL 51.0 0.5152941176470588 -18309.0 49 NULL 24 37338 +NULL -50.0 NULL -1752227496 -15601 50.0 0.5256000000000001 -17950.0 NULL -15601.0 25 NULL +NULL 35.0 NULL -1752227496 -15601 -35.0 -0.7508571428571429 12565.0 NULL -15601.0 110 NULL +NULL -12.0 NULL -1752227496 -15601 12.0 2.19 -4308.0 NULL -15601.0 63 NULL +NULL -23.0 NULL -1752227496 -15601 23.0 1.142608695652174 -8257.0 NULL -15601.0 52 NULL +NULL -22.0 NULL -1752227496 -15601 22.0 1.1945454545454546 -7898.0 NULL -15601.0 53 NULL +NULL 21.0 NULL -1752227496 -15601 -21.0 -1.2514285714285716 7539.0 NULL -15601.0 96 NULL +NULL -49.0 NULL -1752227496 -15601 49.0 0.5363265306122449 -17591.0 NULL -15601.0 26 NULL +NULL 30.0 NULL -1752227496 -15601 -30.0 -0.876 10770.0 NULL -15601.0 105 NULL +NULL -50.0 NULL -1752227496 -15601 50.0 0.5256000000000001 -17950.0 NULL -15601.0 25 NULL +NULL 55.0 NULL -1752227496 -15601 -55.0 -0.47781818181818186 19745.0 NULL -15601.0 130 NULL +NULL 29.0 NULL -1752227496 -15601 -29.0 -0.9062068965517242 10411.0 NULL -15601.0 104 NULL +NULL -42.0 NULL -1752227496 -15601 42.0 0.6257142857142858 -15078.0 NULL -15601.0 33 NULL +NULL -20.0 NULL -1752227496 -15601 20.0 1.314 -7180.0 NULL -15601.0 55 NULL +false 8.0 pCt10IJTv8 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 B7aMvVm446mg46CL NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 fIjNh3dt21cMWe8 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 11Cjb3gHPUSjs1Dg3Co443SD NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 W4TEt52sKL0ndx4jeCahICDW NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 B26L6Qp134xe0wy0Si NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 OQQgFcOqtpjdsCCejbvAAi NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 Gb5w0aja8H NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 eDfHPeW364TY4A2Jhm NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 4jGPKNFY4TP2K8Gw NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 4Ma84C526OTHw0tbwxaQ NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 4hVoMF62WFn82 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 GY0R5v7a8x43DO5 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 v74G5Gs3 NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 y0Mqh552G2 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 h3qJh214D NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 8Ie6o54y NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 65NJ5u6TD716OP4hB NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 Df7N7eedkot NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 QCqa3FP8v3D NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 dJ6UMgP76K8hC6dVfqFW NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 41OuKHD4wRu238388Cq NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 uj2wiF041GHx NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 6gG4WwoSJ887F15fK824g3e NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 674ILv3V2TxFqXP6wSbL NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 L15l8i5k558tBcDV20 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 0mrwaF7Lj8 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 LOP6Akks01gG1 NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 GV0Wt1N7Q NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 s3Vu3wtVYOJbHGMLQW1 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 D51v22DPjSeSplVUk NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 CjhiR NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 827237W7G6hlU0Y60L6Sm8 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 40vWkNP0f6DJQu NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 8We4u3732apuHDPV NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 1w6mvRv543W805LP NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 514eg00Ro1RtB8GGeUCHYAqS NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 I2p1w NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 vxwTTLWW2SR5u NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 Q6LDBb NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 0m8aHX5yF5muTQW NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 pHr8j7sK3hQqSGPT1L320R NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 B50OoxbIK NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 8JNVrH3Lasa826 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 10c4qt584m5y6uWT NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 UR4W5ynqpg NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 x5x535DWvIpVDYn NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 6xn1INe8xSG0487IUAaMYRH1 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 tm85HNL7au4na NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 F3u1yJaQywofxCCM4v4jScY NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 kM4k0y1fqwton NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 YE7I5JK87tW5 NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 2QJ1CmlPPD4fLq7 NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 a88x2Cl NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 3VK3CE7sganaEC NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 Kj0Rtt5r6bFQ2NGQ NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 w5bn2LhMiFin26r3 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 4p32f3dqm6X0Vyd NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 ifm05ON NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 0D6533 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 603r01G4J NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 5iRDem4pt4 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 1lxocR56Tc6bWcLf1GHE7 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 Nf1SX4jg2f7nyT NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 s2y7T NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 Frlb0SoQ8 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 f62KPh6SmIy NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 v2xYG8X7P8HjL3n83 NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 7OnIvTMO27Hksu6 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 g0AoxG8FyF NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 6sB2kOb37 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 w001v23l5b6tau7H NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 eJROSNhugc3kQR7Pb NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 1N6BDpg65g6 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 miQXFj3fd8Uk388 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 0UR5vFxRwBc8qtO NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 0rNlSy15Xy1Sx NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 lsridF1nnI NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 sS4e8jrP NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 2x480cpEl NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 NaDO45Xxri3X NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 r1L2WTM NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 U3MM60y4t4Ykm NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 MRoENDT50CoGq45C NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 gY5CjIAG71Fh NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 M4HtnssfQiEAD0jYL6 NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 E1K2fsDf8P NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 iStQPx6j8SvMc NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 8vKN51JNM7 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +true 8.0 3yeQxU NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 fn7k8uv2T7Ifrg NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 mLcj2Cd6L317mcE8Wyv5 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 E50C7d53L56 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 5mPiHh NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +true 8.0 t8Lh68DM18aEr4G7J7dX2Ee3 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 YNsNwqw8y7D65 NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 ytpx1RL8F2I NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 e13dNAo71UXm4Yt1u NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 n2W51l NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 82V4K75apw NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 jqhcD NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 x28I3iV5XV870TUy3Fww NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 kNAHl NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 LAi381BGdEy78j4ke NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 NSLFx NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 s3WL6smnb7 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 NEGa0N8MJ2dnn3MKAfl6u NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 Q3F7MokUsoVf1xHYCorS NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 JXySu NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 KxewntCJ0mlktP NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 7o0LS1 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 oICOhMTtl6X2 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 04w7DF25lHW4 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 2p7ND20blG8t2cy1VRh16 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 Y3oJ30U4LUuen7U6JjfaexL6 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 E0E7P7p84ltGE4 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 Q31pMN30tPv010W0U2h1s124 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 RJsFsi3a85svGBfT8 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 h85CHOY0SM0YA NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 05RA7lJ5odEHh13Uj8JkO15D NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 e005B5q NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 gew1eby3AlYSvPICC3 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 6FY0I4YdYA NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 oNWnPJA7QT NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 xO4e02k1jpEEwO80AwCHb4 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 iEb04t2x333EF5wHoKRs6oKB NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 SrPY18L7FKBp8WO NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 64IHiaxNk4lo NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 3EYb6FUI5ckmAd24bR7Juc0 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 BwXBC7rU57 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 556IHnw5U5QfD4 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 kKL0p8pvX01sGT0I5203v NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 uXFnovL64803 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 Xxk00X NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 C470S3c NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 qC2BA3oYp NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 M3jjDj4cJP3yk67GlPULUx NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 8o0l440qDP1 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 V5oM8YBx2Kq63oy0um7 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 pxUt0f57qNtt3 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 rye3kBRGod1su NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 X1haQ NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 W3h83yyQNOicy1k7lw0Rb6 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 27pysB0Qg6oA8Cf4cjWChH7J NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 efnt3 NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 mw3S8 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 vfY7008pQEkX2F315E NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 cU6HuP4A323 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 Yj656R8h5j NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 37nx5s6QE3F NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 Kk7EsvD4vMj2ijUnhyW48 NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 J6S681J6JPB2SD6Uc08U1 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 4YW4ASjU70MkyO2biMUV6 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 31rhe NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 lc8t8231OXG6C7DMG7Lqh NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 2RbYGSs0tvc6C574BcmprP NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 j2UTaANoWtpw2co6Nj3bR2UG NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 67CifPaaWjudYUDTB0IU NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 5u0iXh2Y84QgUXkfi726oF0E NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 Jm1d3h3OxQE NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 83tP8 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 8xLnT NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 sE158DS55 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 O65HL NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 W8IM4inL46o67VXd NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 mli7064t5U NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 Tt1BcY8q3welBr7o22KI3jF NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 3LWXOlGelGXQu64Lxws NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 252YCGI2DXxpdm7 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 tKRUQ0e NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 Wu3285CX753 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 j2dqLVpEPr87jVGVotModCHd NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 1RH526 NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 418K4e01f6b NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 RsYTaV3rFO0kS2R4 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 ctL23E5x1d1 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 bI55nJLOusG5i NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 ImYiNP1Y0JoBfQLbd NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 jKOcSGq5CIGQK8wPD13l7 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 W114Au1ELrT7tRYnqE3MxCv NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 h5M1D3a1q528tDjybg8 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 I6Yl6OVpH65i NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 THh5lsUQ8a23g62 NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 8evw1sI852U4bid NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 6bnEapMI6L NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 vcw13dF2uJ6S5GEq3P1QV NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 1EQPbIb2Wc0v60b NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 22w42i7d7D2lhn6jfnlSN NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 AIqMWf4G31cTSrfl1M6VKm NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 3p52k8g15nQB2biT1bn7 NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 GX1nfv0HF8O3 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 Cd6HS76Hi77r7YGGH1 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 4UtjbA8bV4lkm NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 xqiJqgi4N1AR18yC464f1FC NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 x15jGM0RqU NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 1pxO53oqqBm2 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 Gn2Q3q7bvg6J56K NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 1nnwS4QL88H4N4NItBY7Nje NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 75RG2c8 NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 Xi7kOTT NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 nPy0TgiIloESA8nQ4Kkt2 NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 4W3748j3JCC NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 g0Kgv01XSAbU8u NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 J1kjNdL12V8 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 EqAU5Jit8kJfgutgf0U7Ren5 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 4E4kmNOo5dbi25IJPfr05To NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 3r3sDvfUkG0yTP3LnX5mNQRr NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 QJocgOK5m46i2F1rfSCy NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 GciA5Y0kP NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 HnxkMvjEL0rF NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 WhTuEkrt5Qrp5kj4xtFl8uW0 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 Qk8f11O7Q NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 LHow6beTFmm4fPjj43Qy NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 1NydRD5y5o3 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 b0G65a66732y6yE65hQ0 NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 8TY873CPrH82JPwf NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 DuLQkL6 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 c61SOJvyi4PAdi0o NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 hM04012HKnNf8M7KhUi1x NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 R8B6PMUCp8Fuw NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 HcbsR51rXDw7016fVOt83YaX NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 5882EoppT NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 wO3YtYQ6XLp7w NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 RhOnR NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +true 8.0 H37833CDTytf1mp4 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 vkbGEG4q11J550U7u5EnSs NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 ryp70i8Er3IclwRg11 NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 Or43Y6lI NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 TBI20Ba2YuO44754E2BM NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 mpos7eNU1b3mj5 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 rWDAhu0jHF0kmKoFd4kr03 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 225vmIW8L75bEWVwFc NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 FdxyM7c NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 N7jXiULOjt7xH2SgHwC NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 jVV883J5rXAE5pI6qK NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 js4yrqYjb5asC5O48RlOoS NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 5Q5UxO88 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 f60N6lQ1JF8TPt NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 3CrD10MgcCY1d5E21 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 M3Vcm3o NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 8jQqh182kkY6 NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 kN1P50L5yeSw NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 q8lY7m8OpG76x774s NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 q5k5l8H NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 pHBBhXH NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 M32Kp NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 eIyS41R32 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 djLQ52K3s5ReY3TQyWRl6 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 16T0Q0hg2 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 Qq3MD84DHC14CDiEGB7p04DO NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 LQd03j0RQEIsglKmjFPuYXJ2 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 EjY6DSn57x1v5h NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +true 8.0 6Ob80MBP350rI275 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 v0uSTRyX5A4W NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 0siU5JLRoUBPi88Kenqg4 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 jxNdt4 NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 l35W8012cM77E227Ts NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 f3ylU62g8n4VsaJawXV88 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 5wpDt358nV NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 A3lqQ7ei3m008SlRm NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 s4LPR6Bg0j25SWD8 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 6kTCAoN08A NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 87Gan1I33d5v1 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 8lALowC26N0kJ371 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 yB5C57E21h4e5E NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 bP3R4cDVvx6t NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 p6umK8ea57Xg NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 5ealv0e6tmDnoS0bOmX NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 L8Xlx3485W3NxHr0q NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 d52Q4 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 8reJCOg48gHGHDs NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 G0PNHsT6RM4 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 XBfrKWaX68o7HCfKf NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 4dYt6bF5xfHG2v4Fd56P NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 2wgUNj08KLsG4wks06 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 3y1D3A7yxnQenJs NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 fFKkdcf NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 4eWh0BTSBEu2 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 h218Rb5gYs NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 yX1Yqh86o275cYKdoU38 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 V3xf5QPg7EABK NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 jdgDsOTsyP7Eev2471637 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 bO45EOf7qg NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 nk8ff5B5H5R7Si NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 AmPHc4NUg3HwJ NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 7c4q8O8ft1FuY1Mbsme NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 N304RM2d NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 1d8jOa45wiiv NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 RG57safmo8UjXo4c1230u NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 RtaC46i4DIukN7svr21U46G0 NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 eAGNl00o8pA000I48 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 vuNP0Q21M NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 0KG4XT6262r NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 0A2k346GBQ NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 pguqNU5184b47aYi8g NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 4ieWq56f7mIjQNs783D NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 I0ac41cnFsVAkHmhupt NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 6shc3Y NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 Sw74GCctTG3OmA1S330EC NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 K630vaVf NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 7660JjSpC0gG NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 34oSgU32X NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 hO87j00S6nkbuEFh1rL5ie NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 A74P2VrP7Ao34C87cV8634 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 By4JbbLm4g1Kyq67Er NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 t78BN1 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 qI8k4Mf NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 Fj7LiN85m NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 36fFwTWHYaD563T4Yjx1 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 41xyA NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 xOSHRK0e6243CG0Q NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 uGVS4blOlUNnx176 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 OTjMvEr0QiygFX856t7FPPlu NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 3ConB NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 qreC048mFnygscYQ6DuPrw NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 nJl6242B6arixd4RTTp6wG3 NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 8X155 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 iB4VI NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 Lcat8FGEhBw NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 JxddK7Pl4VF48 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 7SNpQFhk20XW6LON1g NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 HJPWlb23N NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 s2N0j0FMB2k5hnMb NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 DJxhgDD0mIQeDgs8 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 vJ153TP7CVIC NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 3Ke6A1U847tV73 NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 1u4j8lva4XKq NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 53db1o6XRU2CbwxytJFIg NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 G6M7256nG NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 16qqkM5M66EMI3uWjWy NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 410uuUJB7nKBg NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 41GNy4 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 jH7VH38C77M08h5GNPp8M NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 y500EnnROOM NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 wPdH65hLhV83741j NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 5k53084hr NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 00k3yt70n476d6UQA NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 818vxXu11 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 10V3pN5r5lI2qWl2lG103 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 GdT0mf0U4Q0Mc8AFsCJ6a61 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 07x1c NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 OE4GQ84apBXD6 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 3weWVXQv3HgolM52OI2J8NAn NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 0T08CcDm0fDWR25u NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 dPbX4jd1v47r1bB6506si NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 Q72e8c NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 8JNt8dc84gCJC0tN NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 1AQR8H78mO7jyb2PBF NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 N7ED661T508c1vmM NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 HnA5J NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 6qdYTwkc3L5LGy NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 jpl2ap113Lt8 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 p575lXH8K2IMIQ4qjma87 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 VU42OCI8nDXA0M NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 2a388Phe6 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 NlXgOC4tik26lq0 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 5EkunkVdHYCBxI30D36L6oM NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 mUY26uA6E NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 qUY8Rl34NWRg NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 fBTrfOGxGui72 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 QSdVNqav1efvKUht5o3N6 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 7Sb0367 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 6Ferlt3M8 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 fVgv88OvQR1BB7toX NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 mbc5yM1H41i NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 28DIm820euPTCMJxiNBtVF NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 aiWFqnj NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 G4XIV50v8Ncd3 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 Le1vfH NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 7WLVW6F4h71Dgk7 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 0un2h56KS7gYB37L NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 4YN58DH0Hhxv5Oc4 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 0uA7It5CJu16eJ4JS1uuxNJ NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 D47x12qBG7n82y NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 euqLv NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 6o50QhXglfo0TlCF NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 s8C16hIJCvCdrOg3q8a1Go NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 r72O13XI NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 sFRsqLf NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 MJ7Ej4tBYS8l2mK NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 JrReU7qfE NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 x4dhr4EV4J NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 AMW7A NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 mvl88OrMd5O2WYb NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 w6gGSU471 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 eKu2BS26qOY0 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 48fOGR7H6oNnh7m3Y NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 1P2TFQRLS8P NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 Md0yyD6nXB1OBFdM2Gc NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 33woPLwH3MFmK NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 LdiBaUk NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 M8YT251 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 AuQ7FrUgXua NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 5SJ2q18tk53g4SdDvlH3 NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 TrVt3076w4QSXF83Io NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 I1be6JuP8HeaA8UI8c NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 b NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 2251WSv5eA2l6WqesdKPM2 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 ijmD5iqIymg NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 b5GwV NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 GS7Sinl7k2srPHIdC7xsu NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 Xtw4eM002sS1101p NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 W4G22U32r8Ck NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 CUa3sAF216u7IeQ NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 8rac067JIBxRah56sw NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 2tV7k NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 6n3S324AM NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 rG7eG0M6IOEb007BB4Ynts NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 d05ua0EQjlFMb NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 t6WHE0 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 doI56Fdj4YgK3Q335155DC6 NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 5Uh3u36dO NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 Omn3514WtBGS26q10wG NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 wyxWr1DYsR15OYJWE6F NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 5b7222ls0wgFVAff7D NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 3D8duxU6ikxujMiA3a1s3C1 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 x8RcAb7i5eeGulx4U200AN8F NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 3kt58sfq NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 0y7AJ4Mgm5KvSXXPh2802 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 2wv4mHH5001Rlwe5vG NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 hRUvK70d5B4F NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 Y4040E2ykhl2ih58m55Pfyaq NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 n3ASjX44hdNqD7smp NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 iaD4Rnj1 NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 H4g4563WvqWkArS NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 uHkBp64 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 2H2X40NiXBIW2f NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 76Gi03D76LwH75q5Qm8641aE NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 Dxc5s8wD6v47 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 h15Uw8Uidj2K5OYWOqQ5 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 02v8WnLuYDos3Cq NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 gfkqq1a3n56XaYAB NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 02VRbSC5I NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 MqcMK622OR2 NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 3ocGWW4eY55A NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 Ju5Gq3IN77dD3541425UN NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 Pjmv0I66 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 0Apbh7X08i2JyMK NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 C32YIF3mQaXSTkCV8D2u7L7 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 6bf1hDU2gvI NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 7vH6I81S0 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 2kQ5t0876n4JffOpftYceg5 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 K1gQm1u7ExEr NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 AASM5H55Q142monqAx3u NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 cd5iw78V2n8N0x NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 LfUyaaMR2 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 5nDHTQtR7 NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 G82p1 NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 rdcFjbu0F7yQ3C NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 aEgURECDWj44 NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 uXAG5QG6m60Y NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 T0rmM12M1kobD2yqIsO NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 bWhq42DR5G1Ypd NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 pIO3OuP40U8U1i112A NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 d5I5x4dq6tFbftHT NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 3n72v2K42wYgtoeJrjhHnDm NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 hA4vIK10755e76nB NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 fCf8y2hv5UrvJR2i1mD0yuc NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 UQv8T28745qO62T NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 36E3s7M68N2 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 3Qn72niu1tSo14 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 865ub2nreG8h0r7 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 8k2NIi3tY7t68 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 5nV8bh0O NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 g28jQ233uRHM7JG5E4 NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 8GloEukQ0c68JDmnYL53 NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 M6567 NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 Iny0u NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 mv2XSjHre54gnF3hbv NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 q3XGm NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 74bXXWTpyU68 NULL NULL -8.0 -3.285 2872.0 NULL NULL 83 NULL +false 8.0 6V8Ok8kTDSE86D8h0q06qi NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 G0QdT8I4 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 CUaLDB NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 Uwyw8I50 NULL NULL -8.0 -3.285 2872.0 -5 NULL 83 -3810 +false 8.0 c2xCAAm6W24ho1Ett NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 f5elgJP3k07 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 kA0XH5C5 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 jqTYMlhRr2crw1Oo NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 GxsOc NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 vUum3jv NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 61b7h3g8gQVJjx NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 4c2KT50dog5 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 46aF585n7xBB NULL NULL -8.0 -3.285 2872.0 5 NULL 83 3810 +false 8.0 6olFV6c18IdYv6pBJG1 NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 nx6ptem0PKtsk07AIkoG5 NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 Y00YWUI2gXA NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 0sB8K NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 21177SI08X0RDP7y70pe157O NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 V746122yhMM3iEs NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 R6q656btrqQM6a5nQ4GcVg NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 N62KU05S73f5I0F77DK NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 2Amg22mSeD4C6OL64 NULL NULL -8.0 -3.285 2872.0 -4 NULL 83 -3048 +false 8.0 qFP23 NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 cb5LPuiF NULL NULL -8.0 -3.285 2872.0 -6 NULL 83 -4572 +false 8.0 RVa8teOcCN NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 Fh0xg4mjc7N4jCrkL NULL NULL -8.0 -3.285 2872.0 0 NULL 83 0 +false 8.0 W8A4i055 NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 G7IJs50P82Y5G4s1nH52Y2j NULL NULL -8.0 -3.285 2872.0 -2 NULL 83 -1524 +false 8.0 casvJ6NR NULL NULL -8.0 -3.285 2872.0 7 NULL 83 5334 +false 8.0 qJTKE1 NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 8eSO14 NULL NULL -8.0 -3.285 2872.0 -1 NULL 83 -762 +false 8.0 L47nqo NULL NULL -8.0 -3.285 2872.0 -7 NULL 83 -5334 +false 8.0 vHIBETRJieO3a6px NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +false 8.0 lqdd2uvmkyl4U1TYY3 NULL NULL -8.0 -3.285 2872.0 6 NULL 83 4572 +false 8.0 6EkcHQJ8dg NULL NULL -8.0 -3.285 2872.0 -3 NULL 83 -2286 +false 8.0 DGu7ynB5SM3A864nRD NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 164334b43QNUJ NULL NULL -8.0 -3.285 2872.0 3 NULL 83 2286 +false 8.0 drU0J0cDrY6S083r7T5Nd NULL NULL -8.0 -3.285 2872.0 4 NULL 83 3048 +false 8.0 a1sV4Se71EjpRn NULL NULL -8.0 -3.285 2872.0 2 NULL 83 1524 +false 8.0 iQq6r8j4suqBapdr7m35j NULL NULL -8.0 -3.285 2872.0 1 NULL 83 762 +NULL 57.0 NULL 1473896544 7196 -57.0 -0.4610526315789474 20463.0 NULL 7196.0 132 NULL +NULL -51.0 NULL 1473896544 7196 51.0 0.5152941176470588 -18309.0 NULL 7196.0 24 NULL +NULL -57.0 NULL 1473896544 7196 57.0 0.4610526315789474 -20463.0 NULL 7196.0 18 NULL +NULL -27.0 NULL 1473896544 7196 27.0 0.9733333333333334 -9693.0 NULL 7196.0 48 NULL +NULL 18.0 NULL 1473896544 7196 -18.0 -1.46 6462.0 NULL 7196.0 93 NULL +NULL 4.0 NULL 1473896544 7196 -4.0 -6.57 1436.0 NULL 7196.0 79 NULL +NULL 15.0 NULL 1473896544 7196 -15.0 -1.752 5385.0 NULL 7196.0 90 NULL +NULL 47.0 NULL 1473896544 7196 -47.0 -0.5591489361702128 16873.0 NULL 7196.0 122 NULL +NULL -14.0 NULL 1473896544 7196 14.0 1.8771428571428572 -5026.0 NULL 7196.0 61 NULL +NULL -28.0 NULL 1473896544 7196 28.0 0.9385714285714286 -10052.0 NULL 7196.0 47 NULL +NULL -26.0 NULL 1473896544 7196 26.0 1.0107692307692309 -9334.0 NULL 7196.0 49 NULL +NULL 32.0 NULL 1473896544 7196 -32.0 -0.82125 11488.0 NULL 7196.0 107 NULL +NULL 9.0 NULL 1473896544 7196 -9.0 -2.92 3231.0 NULL 7196.0 84 NULL +NULL 12.0 NULL 1473896544 7196 -12.0 -2.19 4308.0 NULL 7196.0 87 NULL +false 11.0 57WA7Sm6RuEiouyjK3 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 MQ0fqWv7k48r6kw NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 bq7qevqgOC NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 KoTnkL5820App0hb NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 bGBcSi10VWt NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 PUn1YVC NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 pcnq40qUNuY54 NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 K55mHG1D07 NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 BYD32YqIWlOgNpL NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 6m476JFPvAvlp7KTyU5C NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 T6ubsbx62cmP NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 5tdqo738BN NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 JPh1g4nGHIT0 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 357GvGhVK0325aU NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 5b38BDVq7FrK342c0iI2w26H NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 20ub5m0Qgh NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 2j2W3xc42VkSq4Nh NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 u6aAurTkTTuKL3gU5s6b80SL NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 pPDa1 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 gSJS1mpb5Khx8140U3 NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 s4q2UkuM0 NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 cv71a87hIMbVuJ2dAX NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 s5unq NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 6PpbCyjf6c88b NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 q4W42sg6k NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +true 11.0 ce6C1MhLw NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 m80sprxq3O4J4YC6gh NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 xbQqalYlo NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 10pO8p1LNx4Y NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 2v5Ux NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 iJloCx17VlmyNl881XJ8187 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 47INeW44yvsne46Mu NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 FvrWP NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 v6lPjluh77k5 NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 kK8gg NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 5308t82fc4 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 woeLEb NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 vRRg2BqTsJEV NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 HgP1PNA6gggV0v0L801 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 HfAollgq3EG6 NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 Ni0502Nm8 NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 KFSPYD NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 TJ0dMNm6s44r77567jk5 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 bK1Ops664m7u46sIF7Cgn7 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 GVsdgDhg NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 2SOiwMlQ55T05111LrY5 NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 gk0kJenBW237uQoxGBx36 NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 HN3I58 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 C1E8E3vVL16j NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 6ljwSqpl7n47 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +true 11.0 bx3NrGJIw088yHD5461A NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 Rdj0Jt0pa8fLFYq24hu3UR NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 8Lh4G52x4 NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 mOofw7T57kng3V161Mg4YYK NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 bM34sI6W5h NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 FMVqyn08R5kuEv8 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 GEO5N1eUca NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 wM316f6NqGIkoP388j3F6 NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 tXve4IPACHEIJ5773oNyco24 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 qx6dp6KHBQHn7U14fdd0Rbj NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 WML05unAVOf1F5IDw1S1Yv1 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 RS1Ec5u4hvD NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 316t3Sw NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 8iX3Lj03 NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 70a3Xg NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 LKRvI78ReJ6OGetwpvK NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 taaQ17IeHeH4rk2s0HeTKn NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 767fOfF1Oj8fyOv6YFI16rM NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 vu46n3nUvv7ls2K4k18tvw NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 0rtl1C NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 V0w3pYUxg4Pe85bSga6 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 5Jm0c0pa7 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 1x1vyb NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 28131eU1pSKC35ADujoL NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 7s6O45GD7p4ASq08a26v8 NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 38TsU NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 2Wn3m7QhneidkMX1q NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 U16wryUI NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 Y8QG0P1v36K02sXHc84 NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 RyE4Y3w2gXf NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 4j16o2bV34xFa36 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 2VBb0ATBqIx4n1Gm7W8 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 xVIV6kFgqL8r1tcY37o0 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 7Kp283Fa5 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 8v064ye21c NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 ioU8KlM6LHCw4V86C NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 5kiN628ldFC6 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 ai6nt5l5gCA3p71Q NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 31m1d3P3AD NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 My4DaO425f86c7 NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 rs1jgr3QXsF803w3Eu NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 PLFB86o84end3tdsS2hVL NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 M10C4DWJ0Gn NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 1q3IAyF41KDbkoUH0UF8d NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +true 11.0 yJ67FYA NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 c0gO7g27mjW4XEaUK1fXvEk NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 30S16Yv88FUQsDS2 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 5VexJO NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 4HuS7f55wM87e NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 rKJRy0v1t2MRedVl NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 THog3nx6pd1Bb NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 6H2gys6m6qldIy4bENoFI NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 JLB7v50LP4KVsH2or1ih8821 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 68k8JcLTRwf8X2P7nE4X NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 RmHlM NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 181O0OJ0P36g7g37vM2M6 NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 3xN13QA1u4nP NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 26xX874ghxkA8bV NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 7Dl7rr2aa2bfovt1yny5v NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 1Jq7kLUa3loRL NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 BfGE56ef2ej NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 3abOQ1oI NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 0dtVL5IFPf NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 6lqfp6xy7uLrK1oqee NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 0iqrc5 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 ARECS NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 myW247hI5iQQ4U37x5hK NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 57vi3IQLIES0Q16OTuiC4Hf7 NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 FwMw41y68NnU0FGJ5k6 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +true 11.0 M3qqxj71FawLd2slbwTO0 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 Xr1Lmw7g3730qA0N6n NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 BxH575uxOuCE6sxn6frt NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 xhAUptat NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 0OD14f5eu NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 MK45RAOe4Ugk4UJ0B NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 MP277gwYLn NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 SE70BON7C5PmaUdg NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 IorWR NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 p014F NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 2yd00UDPJUO37S4qfT0gHyg NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 ladcLQv2Hj7mc NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 wB06b612o55 NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 0IX8xRUO NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 R4MT4f5U NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 p568R4q2d3342ejH4 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 Oyt670i0bysk650i2to NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 EQT56g5A73m3j NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 K3Ajb4l11HjWeEEnM02w NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 e035q4Ba4721NL1l NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 8TM0eO67oHDf3spTRmJ8k NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 6D8Kub2t61I80E6Qe8VkYW NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 5kX417RB64367vBw38XVJB44 NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 scPuaL7lo NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 X53h8r5nuFYOY3vop381283 NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 bq2VE4s1Ps NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 gj5IRDNe62057M NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 h6pSh1A3WMOI3eY4IxD NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 7a44BmyY6sULOArK1Jv65nnn NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 6k775i02NM8tHyWkkUSbb8O NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 7q0iMi2GDq0Q NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 Q3k1H7E0N8B0vl22437 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 vQ0a2oe83D2j36d375fkya NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 HtI02nss6t8S0fqH4vcLkCD NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 yOnsF4mFp NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 y4M5U7WAv4eCCp7 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 We3CdnjxFCPE NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 6a421YV NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 tN335oXx NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 g2vI6MW2 NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 Y55ytQtGRN8l58131e NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 gv7hVe3 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 02vDyIVT752 NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 er5IUhd505r0lT6sc20Tef5q NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 uRcc7 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 W0rvA4H1xn0xMG4uk0 NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 G2s1ly NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 1T1oN5BQ NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 dDf3se3j NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 tIyd6H2oamr52OU50 NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 wb5t2UC67jy84KejtAa0B3 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 PKyDxRfT7OOR370M1u64Gb4 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 j51d0i7u3KGhTKavw1C NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 w7rU1B5g1v1Nkit7A2ruWT NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 GHU6et8f3CY NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 n2d32Et NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 aEi5JQHQPd4Y8 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 2oIGN5REv78NrkB5Id2u NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 B0bp3 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 43gX6s3LEYUcX668Ig5y NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 PTl81NEYpvuKFBbxAOVh NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 7TSXOfbQHsNGLE NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 PrKs7TD0B7kj847u56pce NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 r3See3oscOt3uwN NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 Mk4tWJvwrb NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 ie83eEmqsGF834r4COpw7j NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 NGPH4Gm5Nq4e4Ub0D4S NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 pAyF06b56PDyJ8PM NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 ti12sx NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 2488b5alBL0PX1 NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 E4ekAO NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 jxkVe1YhhX3 NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 j60Kr2t1K NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 5e8nU8q6vy6hcskp844R8Kt NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 cTWO4kFIrl1n NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 JhS7I21kB6X43NB8U8 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 M07G7IO4gFx1o NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +true 11.0 1kFnQ8Xw3 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 MpcgmXIn662H8 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 78WeV1A4Fuo7mPSX NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 kPC4VEoqGJthyOfD1r82GId NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 54GiCgon04NXfnms6b5WRj3W NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 sr70JNPff15hD1sl8D NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 lg62eCuo58RSFPn5Va8va0vp NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 1F1K4Rd NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 b NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 oa1p31X62jj14cJ4 NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 NOl00pk86Qix8KT3QA0pva NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 46J0D1L5q4xsdl0 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 P2o1Lq44s3 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 Y6net7wDJ2TVjq2u7H8aRCyA NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 CEGOy NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 O35aM54x2F07Uq0f NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 Iwu3T706wKyBs33 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 El5RUByTr1xve1tM NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 YwV7DVLB0kut0S5p NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 ANpel663M NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 5sQ4qB4ML02YI5Jo NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 RlrTc NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 4eFGE3dwF5 NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 kXbBM1GFdKM NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 I12pYjar NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 bvg7bP3mln3ILuC888M5DEF NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 S7ilpQTm4W0w NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 MP6mdTJr380 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 3fT7I6UC6 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 au3q16lrAbWbHFqF NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 uu20hX NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 41Uxbkbws7x1oN1M5I NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 N3ieX NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 7uEJE7MbCywRC46tr NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 7d4b5KTsS62wJ NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 7jtP3C204M33 NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 yRG7acYwS01a04X7XaW26B NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 etHtCC NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 8dDe31b5 NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 5BO6u6 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 HkX7hlT2TK0Je7ersfx72o NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 Y5ls7N3Qy30h43866R3cL53 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 2Mwn2qTjLVk NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 nlVvHbKNkU5I04XtkP6 NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 2dj7o NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 KJeFD8m6cR26L NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 n6n772vXEk2CI05PPWhN NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 k3a17i1ndf NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 8ev7c4JiIUUM5R8yV30 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 24jbgb42dtP NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 1N77rGXKwbO78axvICg8Gh8 NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 J3HnM2C4sNnO NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 C677g7qo071FQ4a NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 ngUkOdOBOk67o3mcc NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 72F3g4s43q208a2 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 D2s2711 NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +true 11.0 1j80NSLbNMdIc2H3R01D703 NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 qPiV0J6QDu NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 0mokQ053qtj NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 R426VY66G3alY1rISv8 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 cre3m4OHF4H4x7nM NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 M22umK0Q1S2Q80358P6 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 B553840U1H2b1M06l6N81 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 YjyfU613tjGy NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 1v6A2yY2i NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 NOg4pvkcNV838CleFwsNLnOK NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 77E8Xqg4LgN6ShBGOC4 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 pu2N7if4qfrnK5 NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 v0w25I0uVTf413Rar14 NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 4yAo7t54rr50u6Vci3p NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 2WKo5 NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 lE7AE0Cm NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +true 11.0 nqThW83 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 KnmtSR55J731b NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 eoIG247 NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 mnfiV3 NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 40CP0hDas6g7m NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 0HxgXxO8E4kP4pBLH8qH NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 2d3tQdCGQN5k7u7S NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 tlH5St NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 651R8MJPy8jvOnu3d NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 0uu4FunxNR7iOvw7NyH7mo NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 B5ObAu54 NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 rqvN5KT0jA11w080At NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 2taQsaEJVXuJ NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 K05HlW2Kgr2Mdwr6 NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 64Vxl8QS NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 6oAU0mBFKtwXOIAp7Yqi75H7 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 7Jg216IPQ2H7 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 frhe0 NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 SN5NB5L3gpe2RtR2w50sNAd NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 M7xB374ixGAp NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 2kechLGLtV1b2FK6h NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 A1g358aWFHPT06lWjso8OeQ NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 d8p1NiE467oJer5eVW2DBi NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 R70XMwQQS NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 l1xK7L0L6TjOPrB1tc NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 i7n1eoq1Iw3r5q3qI3464 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 i0mx8w5HB8THd5N NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 8k1748I2BIW53LK8dmc NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +true 11.0 cUbphr2Or2aJQ0wNK3 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 U1aid52v NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 o8v1574KSnXlsC NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 g6VL0j3k7pEcBq0Hbsk NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 WKH6j0Dtb3VNsOa4uFq2v NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 O8YlG62p5C NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 VbPmiEv5SDp NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 7xh48cBvt34812U1at NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 M5MJdPI5Agcy5T NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 rW58d3yGN1w3XhS7hx3UK1yF NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 2a7V63IL7jK3o NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 ann6ipj6 NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 k7rg3Vw6IpwU6 NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 pL11U1oq48Oj202Wy2W7B NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 7CKu35ao6U121E3o NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 S1Oect6pTauCf8OiYQTgQG0 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 KIXnc1tg5tx7JUmV14 NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 Q22Upqia NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 SeT3MaHfQ2 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 L64VGc NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 LG13x2kvfvoJ5p4650xdQPo NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 A74OqWUyE2kkH1o0Y NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 FjUt2ol81V3DS18I NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 6SxF1xVO NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 6s3xvhV71f7c6l0Y8 NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 F63t6sNxS3C0yBtcHAUU8 NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 MgMjEMssUEN1 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 y605nF0K3mMoM75j NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 xuX0OPw NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 fKbw64QavqgbDL2t60s NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 M3aR2541oGHpP2mTt0d68 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 VFxw08l NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 8Bshk4eu870M3VyJ8c4D1upr NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 b NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 bQmm3Sk5f0ib NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 3H10xyM3GNP1 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 3EdQS NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 WAE3FjRSY77c NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 e5YfpR NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 O56QsHRU7FCsDRCX5Ay2 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 6eeRVS85xD2q6Q8356 NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 5TVADgO1Sm3 NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 OXHevCW4J150lO46s031n NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 w6173j NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 rss1vw14N NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 IViYKd NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 3q00y4llsXx3Ao NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 mE6lh4Kb1O5F8UQ NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 7GeACqY0R NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 h301kgvvRS1JMq4S8dl NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 Nxd2HCv NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 2T6W6I7vsKk3j6Jx6Shkq3 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 jO055kB85qLIyl5VJVkj8 NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 1H6wGP NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 1V26wN5LmrcPV NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 3afvyfFbo6GH6JS416cesO NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 6F8wR45s5ys8AkrBE17dn2oV NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 5CbP5V2x14qPOqL3J NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 H5alUwndRKm NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 Qc8i8a3TFBT7M4tb1GFhH NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 aGx8GQM1 NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 sx0fwIg8cKq7pu NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 yl7A1QkSCYHui8cwp4b1OW43 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 3mM337C NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 LiFH6M60q NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 nM5TO25VC7BK623 NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 ve4Pgoehe6vhmYVLpP NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 nc1y0EKQ51B4U0F06 NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 2sF6Qdn5w5qO805cSaFV NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 UDXHJf5 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 2h2qsp14cr NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 f0QmOLoGtou7gq42fy01Brn NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 YLh18Tir3Ga NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 cp30v1 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 QDK4Rtj7CX01p NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 GPijCx2T8HpOF1dN6 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 hl4w6g0LGTr2q7740MWXNhi6 NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 K2Hjg3 NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 jctXbMJ5l4ypSx0SMGFSQtF NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 xefguKKDB5IsOAO4uv132 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 7SgB6fRom0PLEjCH1 NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 75OuwM0O3qDy NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 QOev2x2w0723qyqs23d3k28 NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 r3CkPpt24 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 8Q14Obe1sC82s2s10v44Pb NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 0FEc2M56c3aXrUw885 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 2QNVLQqPARH24r6rb4 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 LxX7UfG58X6b2TTCwkEyp6 NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 15cWEp2JVNf8 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 WQj6R NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 awXW5ct NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 743510L4r5Npy NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 jU6BuS50j NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 hSb1x4 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 vjtW5U2e1 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 JUm3vwG65q33 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 H6UGGj6Bq4n0Dxr NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 6lG12Lw NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 033ffm5082ng0V NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 FIVQ8 NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 0w0Kn7n NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 ShA4jlmOwF8u7kjN NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 ASm1a20I155Y NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 l3r8T4QgT63 NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 8IkicjRJ21c054Id NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 0Y77KBQmKC14u NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 y6LhmEv NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 pn1RqShxA031bNd NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 2vXyUmN8p0lFrAjL1q3wOB6 NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 570Sgf1L12mIrag2hICI51t NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 Vk2Iv4mbULOS56roWfC3t8wE NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 1gdr1s14ckUm4h0A6Qj NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 MegDovU0eCg3fkXrbtkH NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 btgw707cKS2odwbePK2B NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 6mQ6vL4d NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 K11m3K43m5XFX40RJm1q NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 2BFlmLpq7F1O6 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 16P2kxk NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 WBCaAb0o2Lsob4aiUHhvDx NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 8E6m0haq3625pJ32EE NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 5FD1Pq2Me0754jnw64jq68 NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 7e8cuG44 NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 RFDIm4Is12 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 KJBwt NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 t2Hlw6483gjNM4UmOetl44 NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 r1RYHxl1G1um8 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 u8vxgV6DeMarpPIoNRQK8555 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 iASE7cWnCT4NRf NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 7g83b3nl NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 5ocI6aD NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 L7n644820 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 YXbTksK2YAt32i4vi6xyT2 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 l6E3G8 NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 xM1Gglkeqdcp2kE2v6ss5Cb NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 Bgk2cxNJk7f4rMmW38Dl3S1 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 icCP7UDP0d1h5q NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 K5H5uc6M367aVUqW1QP72smC NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 bJQO0 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 GCAqH7rTc5Jt1Rie02v NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 KymYC73 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 c34CVGK345 NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 Xw6nBW1A205Rv7rE NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 Hs1UjxW81 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 fg7BpI NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 370Iao42Ne47KoMuv7L0GKqE NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 uo1oJ7l NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 sOLhNq8p65eoW8e46X12WL NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 P4shXtBlvn NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 41A0nYX72UOSfxO4053xy NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 BLoMwUJ51ns6pd NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 Kst24 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 b3T1L5u7us8 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 7Xt47WK7fF0OYPUVU3Br2d7M NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 wcBrVnjG NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 1q6mOJMMOOaF1FraYJET8Y NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 y2d583F10vH NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 lEr1qTVVC1tC NULL NULL -11.0 -2.389090909090909 3949.0 0 NULL 86 0 +false 11.0 5E1p5y1HXY82QUbObgeA NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 ka4xX NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 T3qQxO7gFwJNh4Mb3 NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 VugB74M4f31f0 NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 2FBdToh5748vG3p1f4A2Koql NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 XBTRwI0J NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 6KG7M5SbVWfA8J2wYvDbR NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 V2NEmm6d0kLFGa5s01k NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 g552y0x1B4n NULL NULL -11.0 -2.389090909090909 3949.0 3 NULL 86 2286 +false 11.0 2W5VeOi75DI33He6HWk NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 Mekui5MM6PUU06e NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 6Weo4BXewS0 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 r7O5x3RuAB6v65VR2O71S3f3 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 7hCJ5yJvt0775jjgq8S0bX6W NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 02k5poW73QsWM NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 a7P5omBy NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 7i03i80 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 225M5e1OeEOu7v NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 YUKS3r4spEtph1kg7 NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 vN0g7Ptk7aTyTIH1cCt2sX6B NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 E6EfhWpAlcoU2hr NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 3Qm5PpAGbhf8NkWHJPv NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 jL3mXoEuM0B NULL NULL -11.0 -2.389090909090909 3949.0 10 NULL 86 7620 +false 11.0 GDW1pK2834Y NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 M45b3SlE5q5n NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 eicMhR0nJt12OH7IO2651bO NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 bX48CaI1txU5AGn2AmEuKj NULL NULL -11.0 -2.389090909090909 3949.0 -8 NULL 86 -6096 +false 11.0 e2m8waBVlVU NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 o1q75 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 qAoGjP7q7r8p460I3aT5x7o NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 XSv8Ti8c NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 nClXBWi0y0f664ah3 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 o6tgwEK05ls41D2fa NULL NULL -11.0 -2.389090909090909 3949.0 -5 NULL 86 -3810 +false 11.0 6OdmC8H5 NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 IBVBmf6H8vCc4n NULL NULL -11.0 -2.389090909090909 3949.0 1 NULL 86 762 +false 11.0 8QWCbCQMIc3bsI7 NULL NULL -11.0 -2.389090909090909 3949.0 -10 NULL 86 -7620 +false 11.0 B7P12uoI NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 ijU4c NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 B7grxpIo8Tf33RjGTg0 NULL NULL -11.0 -2.389090909090909 3949.0 -3 NULL 86 -2286 +false 11.0 1j3rth56N41X17c1S NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 65mIi6OLkWrv1iSiM1wia NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 H581dL8J4qjjb1DAPl NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 0w036Qnm3WkA73cw142j1l NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 QRQRpg NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 5O4amH0XK1mu8716 NULL NULL -11.0 -2.389090909090909 3949.0 -9 NULL 86 -6858 +false 11.0 6K78X NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 DBdP640m2jjC NULL NULL -11.0 -2.389090909090909 3949.0 9 NULL 86 6858 +false 11.0 OEfPnHnIYueoup NULL NULL -11.0 -2.389090909090909 3949.0 -7 NULL 86 -5334 +false 11.0 M70kEecXx1706B NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 H8PP4887 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 mXUG4lHU NULL NULL -11.0 -2.389090909090909 3949.0 -4 NULL 86 -3048 +false 11.0 3rDE5ohocdMweTS7gspnT3 NULL NULL -11.0 -2.389090909090909 3949.0 8 NULL 86 6096 +false 11.0 g8d0MGKWIe2r6wivyyl NULL NULL -11.0 -2.389090909090909 3949.0 -6 NULL 86 -4572 +false 11.0 28Oe6r21yux7Lk47 NULL NULL -11.0 -2.389090909090909 3949.0 -1 NULL 86 -762 +false 11.0 xTlDv24JYv4s NULL NULL -11.0 -2.389090909090909 3949.0 7 NULL 86 5334 +false 11.0 LR2AKy0dPt8vFdIV5760jriw NULL NULL -11.0 -2.389090909090909 3949.0 NULL NULL 86 NULL +false 11.0 meGb5 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 +false 11.0 mby00c NULL NULL -11.0 -2.389090909090909 3949.0 6 NULL 86 4572 +false 11.0 nI30tm7U55O0gI NULL NULL -11.0 -2.389090909090909 3949.0 4 NULL 86 3048 +false 11.0 KHtD2A2hp6OjFgS73gdgE NULL NULL -11.0 -2.389090909090909 3949.0 5 NULL 86 3810 +false 11.0 eQ80MW0h728I204P87YXc NULL NULL -11.0 -2.389090909090909 3949.0 -2 NULL 86 -1524 +false 11.0 d3o1712a03n20qvi62U7 NULL NULL -11.0 -2.389090909090909 3949.0 2 NULL 86 1524 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_decimal_date.q.out b/ql/src/test/results/clientpositive/tez/vectorization_decimal_date.q.out new file mode 100644 index 0000000..c20033c --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_decimal_date.q.out @@ -0,0 +1,51 @@ +PREHOOK: query: CREATE TABLE date_decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (CAST (cint AS TIMESTAMP) AS DATE) AS cdate, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal FROM alltypesorc +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: database:default +PREHOOK: Output: default@date_decimal_test +POSTHOOK: query: CREATE TABLE date_decimal_test STORED AS ORC AS SELECT cint, cdouble, CAST (CAST (cint AS TIMESTAMP) AS DATE) AS cdate, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal FROM alltypesorc +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: database:default +POSTHOOK: Output: default@date_decimal_test +PREHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: date_decimal_test + Filter Operator + predicate: (cint is not null and cdouble is not null) (type: boolean) + Select Operator + expressions: cdate (type: date), cdecimal (type: decimal(20,10)) + outputColumnNames: _col0, _col1 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_decimal_test +#### A masked pattern was here #### +POSTHOOK: query: SELECT cdate, cdecimal from date_decimal_test where cint IS NOT NULL AND cdouble IS NOT NULL LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_decimal_test +#### A masked pattern was here #### +1970-01-06 -7959.5837837838 +1970-01-06 -2516.4135135135 +1970-01-06 -9445.0621621622 +1970-01-06 -5713.7459459459 +1970-01-06 8963.6405405405 +1970-01-06 4193.6243243243 +1970-01-06 2964.3864864865 +1970-01-06 -4673.2540540541 +1970-01-06 -9216.8945945946 +1970-01-06 -9287.3756756757 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out new file mode 100644 index 0000000..54b09d0 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_div0.q.out @@ -0,0 +1,485 @@ +PREHOOK: query: -- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants +explain +select cdouble / 0.0 from alltypesorc limit 100 +PREHOOK: type: QUERY +POSTHOOK: query: -- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants +explain +select cdouble / 0.0 from alltypesorc limit 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 100 + Processor Tree: + TableScan + alias: alltypesorc + Select Operator + expressions: (cdouble / 0.0) (type: double) + outputColumnNames: _col0 + Limit + Number of rows: 100 + ListSink + +PREHOOK: query: select cdouble / 0.0 from alltypesorc limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select cdouble / 0.0 from alltypesorc limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +NULL +PREHOOK: query: -- There are no zeros in the table, but there is 988888, so use it as zero + +-- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants as numerators +explain +select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) +from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100 +PREHOOK: type: QUERY +POSTHOOK: query: -- There are no zeros in the table, but there is 988888, so use it as zero + +-- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants as numerators +explain +select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) +from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((cbigint > 0) and (cbigint < 100000000)) (type: boolean) + Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (cbigint - 988888) (type: bigint), (cdouble / (cbigint - 988888)) (type: double), (1.2 / (cbigint - 988888)) (type: double) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: bigint), _col1 (type: double) + sort order: ++ + Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: double) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: double) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 100 + Statistics: Num rows: 100 Data size: 3000 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 100 Data size: 3000 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 100 + Processor Tree: + ListSink + +PREHOOK: query: select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) +from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) +from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-985319 NULL -1.21787969175465E-6 +-985319 2.0297994862577501E-4 -1.21787969175465E-6 +-63925 0.11256941728588189 -1.8771998435666796E-5 +0 NULL NULL +0 NULL NULL +0 NULL NULL +0 NULL NULL +0 NULL NULL +0 NULL NULL +0 NULL NULL +0 NULL NULL +0 NULL NULL +392309 NULL 3.05881333336732E-6 +673083 -0.010691103474608629 1.7828410463494101E-6 +2331159 NULL 5.147654021025593E-7 +2342037 NULL 5.123744842630582E-7 +3533105 -5.660743170667161E-5 3.3964459024002967E-7 +3768727 0.004139594085748318 3.184099033970887E-7 +4728619 NULL 2.5377388197272816E-7 +5391403 NULL 2.2257657236901044E-7 +7022666 -0.0010246820794268159 1.708752772807364E-7 +7470430 NULL 1.6063332365071354E-7 +8276429 NULL 1.4499006757624573E-7 +8286860 -8.683626850218298E-4 1.44807562816314E-7 +8299981 -8.669899364829872E-4 1.445786442161735E-7 +9247593 NULL 1.297634962957388E-7 +9821695 -7.326637611939691E-4 1.2217850381222386E-7 +10000738 0.001559984873116364 1.1999114465352456E-7 +10081828 0.0015474376273826532 1.190260337708598E-7 +10745355 -6.696847149303117E-4 1.1167616146697805E-7 +11127199 -1.797397530142132E-5 1.0784385180852791E-7 +11722580 NULL 1.023665438836843E-7 +12649396 NULL 9.486618965838368E-8 +13126214 -1.5236685917203544E-5 9.142011550322126E-8 +14042667 NULL 8.545385288991044E-8 +14943972 -1.3383322720358416E-5 8.02999363221505E-8 +16259022 NULL 7.380517721176587E-8 +16531556 -1.2098074736582569E-5 7.258844841949542E-8 +16596157 NULL 7.230589587697923E-8 +17058489 -1.1724367849930905E-5 7.034620709958544E-8 +17247320 -4.172242412154468E-4 6.957602688417679E-8 +19004427 8.209139901981786E-4 6.314318237534864E-8 +19498517 NULL 6.154314197331007E-8 +20165679 7.736411950224934E-4 5.95070466013071E-8 +20547875 NULL 5.840019953401507E-8 +23264783 NULL 5.158010715165492E-8 +23475527 6.645644206411213E-4 5.111706331448917E-8 +24379905 NULL 4.922086447834805E-8 +24514624 -2.935390728407664E-4 4.895037345871591E-8 +25154198 -2.860755091456305E-4 4.770575472133916E-8 +25245192 -7.922300610745999E-6 4.7533803664475993E-8 +26610943 NULL 4.509423059528556E-8 +27520143 5.668938566198584E-4 4.360442458456702E-8 +27818379 NULL 4.313694913711543E-8 +28400244 NULL 4.225315810666979E-8 +28698999 5.43607810153936E-4 4.18133050563889E-8 +28806400 -6.9429015774272385E-6 4.165740946456343E-8 +29920877 5.214085135271938E-4 4.010577631130264E-8 +33126539 NULL 3.622473207961749E-8 +34603086 NULL 3.467898787986713E-8 +35156265 NULL 3.413331876978399E-8 +35862260 NULL 3.346136021544654E-8 +36123797 -1.992038655294182E-4 3.321909931007529E-8 +36341671 -1.980096072082101E-4 3.301994561559924E-8 +36413215 -5.4925114412446145E-6 3.2955068647467685E-8 +36578596 4.2650625518814335E-4 3.280607052277239E-8 +36796441 -1.955623914823719E-4 3.2611849607955287E-8 +39723587 NULL 3.0208752296211316E-8 +39985709 -1.7996429674411925E-4 3.001072208073139E-8 +40018606 NULL 2.998605198791782E-8 +41003161 NULL 2.9266036342905367E-8 +41158231 3.790493328053871E-4 2.9155772025284565E-8 +41848817 NULL 2.8674645689506587E-8 +44047567 -1.633688416888043E-4 2.724327543448654E-8 +45125678 NULL 2.6592398234991615E-8 +45180154 NULL 2.6560334433565674E-8 +45717793 3.4124569399052136E-4 2.6247986205283355E-8 +46163162 NULL 2.5994753132378583E-8 +46525838 3.353190543284787E-4 2.5792120068852925E-8 +48626663 NULL 2.4677819244968545E-8 +49102701 -1.465499830650864E-4 2.4438574163160596E-8 +50300445 -1.4306036457530346E-4 2.3856647789100076E-8 +50929325 -1.412938420055636E-4 2.356206370298448E-8 +52422534 -1.3726921327381848E-4 2.2890919389741823E-8 +52667422 2.9621727070673783E-4 2.2784483356713376E-8 +52962061 2.945693522010029E-4 2.265772852004381E-8 +53695172 NULL 2.234837798824818E-8 +54760317 NULL 2.1913678841559662E-8 +55020655 2.835480602693661E-4 2.180999117513232E-8 +56102034 NULL 2.1389598815615135E-8 +56131313 NULL 2.13784416551952E-8 +56838351 -3.5187509222426247E-6 2.1112505533455745E-8 +56997841 -3.5089048372902406E-6 2.105342902374144E-8 +57778807 -1.2454393528755274E-4 2.076886080392764E-8 +58080381 NULL 2.0661021490199935E-8 +58307527 NULL 2.058053328174937E-8 +58536385 -1.2293208745295768E-4 2.0500070170031853E-8 +59347745 NULL 2.0219807846111087E-8 +60229567 NULL 1.992376933408802E-8 +60330397 NULL 1.9890470801974003E-8 +PREHOOK: query: -- There are no zeros in the table, but there is -200.0, so use it as zero + +explain +select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) +from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100 +PREHOOK: type: QUERY +POSTHOOK: query: -- There are no zeros in the table, but there is -200.0, so use it as zero + +explain +select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) +from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((cdouble >= -500) and (cdouble < -199)) (type: boolean) + Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: (cdouble + 200.0) (type: double), (cbigint / (cdouble + 200.0)) (type: double), ((cdouble + 200.0) / (cdouble + 200.0)) (type: double), (3 / (cdouble + 200.0)) (type: double), (1.2 / (cdouble + 200.0)) (type: double) + outputColumnNames: _col0, _col1, _col2, _col4, _col5 + Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: double), _col1 (type: double) + sort order: ++ + Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: double), _col4 (type: double), _col5 (type: double) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double), KEY.reducesinkkey1 (type: double), VALUE._col0 (type: double), KEY.reducesinkkey1 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1365 Data size: 41904 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 100 + Statistics: Num rows: 100 Data size: 3000 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 100 Data size: 3000 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 100 + Processor Tree: + ListSink + +PREHOOK: query: select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) +from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) +from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-292.0 NULL 1.0 NULL -0.010273972602739725 -0.00410958904109589 +-290.0 NULL 1.0 NULL -0.010344827586206896 -0.004137931034482759 +-289.0 NULL 1.0 NULL -0.010380622837370242 -0.004152249134948096 +-281.0 NULL 1.0 NULL -0.010676156583629894 -0.004270462633451957 +-279.0 NULL 1.0 NULL -0.010752688172043012 -0.004301075268817204 +-274.0 6888911.518248175 1.0 6888911.518248175 -0.010948905109489052 -0.00437956204379562 +-273.0 6028764.868131869 1.0 6028764.868131869 -0.01098901098901099 -0.004395604395604396 +-257.0 6404096.53307393 1.0 6404096.53307393 -0.011673151750972763 -0.004669260700389105 +-250.0 6583411.236 1.0 6583411.236 -0.012 -0.0048 +-247.0 NULL 1.0 NULL -0.012145748987854251 -0.004858299595141701 +-247.0 -7546669.174089069 1.0 -7546669.174089069 -0.012145748987854251 -0.004858299595141701 +-246.0 NULL 1.0 NULL -0.012195121951219513 -0.004878048780487805 +-237.0 NULL 1.0 NULL -0.012658227848101266 -0.005063291139240506 +-236.0 NULL 1.0 NULL -0.012711864406779662 -0.005084745762711864 +-229.0 7187130.170305677 1.0 7187130.170305677 -0.013100436681222707 -0.005240174672489083 +-228.0 8278779.631578947 1.0 8278779.631578947 -0.013157894736842105 -0.005263157894736842 +-225.0 NULL 1.0 NULL -0.013333333333333334 -0.005333333333333333 +-210.0 -8876320.40952381 1.0 -8876320.40952381 -0.014285714285714285 -0.005714285714285714 +-201.0 NULL 1.0 NULL -0.014925373134328358 -0.005970149253731343 +-199.0 NULL 1.0 NULL -0.01507537688442211 -0.006030150753768844 +-189.0 NULL 1.0 NULL -0.015873015873015872 -0.006349206349206349 +-188.0 NULL 1.0 NULL -0.015957446808510637 -0.006382978723404255 +-184.0 8944852.222826088 1.0 8944852.222826088 -0.016304347826086956 -0.006521739130434782 +-183.0 8993731.196721312 1.0 8993731.196721312 -0.01639344262295082 -0.006557377049180328 +-181.0 NULL 1.0 NULL -0.016574585635359115 -0.0066298342541436465 +-179.0 NULL 1.0 NULL -0.01675977653631285 -0.0067039106145251395 +-169.0 9738774.01775148 1.0 9738774.01775148 -0.01775147928994083 -0.007100591715976331 +-164.0 NULL 1.0 NULL -0.018292682926829267 -0.007317073170731707 +-161.0 NULL 1.0 NULL -0.018633540372670808 -0.007453416149068323 +-154.0 1.2256894519480519E7 1.0 1.2256894519480519E7 -0.01948051948051948 -0.007792207792207792 +-152.0 NULL 1.0 NULL -0.019736842105263157 -0.007894736842105263 +-148.0 NULL 1.0 NULL -0.02027027027027027 -0.008108108108108109 +-140.0 NULL 1.0 NULL -0.02142857142857143 -0.008571428571428572 +-138.0 NULL 1.0 NULL -0.021739130434782608 -0.008695652173913044 +-137.0 NULL 1.0 NULL -0.021897810218978103 -0.00875912408759124 +-132.0 NULL 1.0 NULL -0.022727272727272728 -0.00909090909090909 +-129.0 1.2758548906976745E7 1.0 1.2758548906976745E7 -0.023255813953488372 -0.009302325581395349 +-128.0 NULL 1.0 NULL -0.0234375 -0.009375 +-126.0 NULL 1.0 NULL -0.023809523809523808 -0.009523809523809523 +-126.0 -1.4793867349206349E7 1.0 -1.4793867349206349E7 -0.023809523809523808 -0.009523809523809523 +-116.0 NULL 1.0 NULL -0.02586206896551724 -0.010344827586206896 +-113.0 NULL 1.0 NULL -0.02654867256637168 -0.010619469026548672 +-113.0 -1.6495816690265486E7 1.0 -1.6495816690265486E7 -0.02654867256637168 -0.010619469026548672 +-96.0 NULL 1.0 NULL -0.03125 -0.012499999999999999 +-94.0 -1.9830077510638297E7 1.0 -1.9830077510638297E7 -0.031914893617021274 -0.01276595744680851 +-93.0 NULL 1.0 NULL -0.03225806451612903 -0.012903225806451613 +-77.0 2.4513789038961038E7 1.0 2.4513789038961038E7 -0.03896103896103896 -0.015584415584415584 +-69.0 2.735596747826087E7 1.0 2.735596747826087E7 -0.043478260869565216 -0.017391304347826087 +-62.0 NULL 1.0 NULL -0.04838709677419355 -0.01935483870967742 +-62.0 3.0444544451612905E7 1.0 3.0444544451612905E7 -0.04838709677419355 -0.01935483870967742 +-60.0 NULL 1.0 NULL -0.05 -0.02 +-57.0 -3.27022330877193E7 1.0 -3.27022330877193E7 -0.05263157894736842 -0.021052631578947368 +-49.0 3.35888328367347E7 1.0 3.35888328367347E7 -0.061224489795918366 -0.024489795918367346 +-46.0 3.577940889130435E7 1.0 3.577940889130435E7 -0.06521739130434782 -0.02608695652173913 +-38.0 4.3311916026315786E7 1.0 4.3311916026315786E7 -0.07894736842105263 -0.031578947368421054 +-28.0 5.878045746428572E7 1.0 5.878045746428572E7 -0.10714285714285714 -0.04285714285714286 +-28.0 6.741291985714285E7 1.0 6.741291985714285E7 -0.10714285714285714 -0.04285714285714286 +-21.0 8.988389314285715E7 1.0 8.988389314285715E7 -0.14285714285714285 -0.05714285714285714 +-20.0 NULL 1.0 NULL -0.15 -0.06 +-17.0 NULL 1.0 NULL -0.17647058823529413 -0.07058823529411765 +-12.0 -1.5533560716666666E8 1.0 -1.5533560716666666E8 -0.25 -0.09999999999999999 +-3.0 NULL 1.0 NULL -1.0 -0.39999999999999997 +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL +0.0 NULL NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/tez/vectorization_nested_udf.q.out b/ql/src/test/results/clientpositive/tez/vectorization_nested_udf.q.out new file mode 100644 index 0000000..bca2d2a --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_nested_udf.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: SELECT SUM(abs(ctinyint)) from alltypesorc +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(abs(ctinyint)) from alltypesorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +261468 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_not.q.out b/ql/src/test/results/clientpositive/tez/vectorization_not.q.out new file mode 100644 index 0000000..b5587ba --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_not.q.out @@ -0,0 +1,58 @@ +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (NOT(cbigint >= cdouble)))) + OR ((ctinyint >= csmallint) + AND (NOT ((cboolean2 != 1) + OR (3569 != ctinyint))))) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (NOT(cbigint >= cdouble)))) + OR ((ctinyint >= csmallint) + AND (NOT ((cboolean2 != 1) + OR (3569 != ctinyint))))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-3.875652215945533E8 3.875652215945533E8 -3.875716535945533E8 1.436387455459401E9 3.875716535945533E8 0.0 2.06347151720204902E18 3.875716535945533E8 3.875652215945533E8 3.875716535945533E8 1.0 10934 -37224.52399241924 1.0517370547117279E9 -2.06347151720204902E18 1.5020929380914048E17 -64 64 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_part.q.out b/ql/src/test/results/clientpositive/tez/vectorization_part.q.out new file mode 100644 index 0000000..66facc9 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_part.q.out @@ -0,0 +1,72 @@ +PREHOOK: query: CREATE TABLE alltypesorc_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alltypesorc_part +POSTHOOK: query: CREATE TABLE alltypesorc_part(ctinyint tinyint, csmallint smallint, cint int, cbigint bigint, cfloat float, cdouble double, cstring1 string, cstring2 string, ctimestamp1 timestamp, ctimestamp2 timestamp, cboolean1 boolean, cboolean2 boolean) partitioned by (ds string) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alltypesorc_part +PREHOOK: query: insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@alltypesorc_part@ds=2011 +POSTHOOK: query: insert overwrite table alltypesorc_part partition (ds='2011') select * from alltypesorc limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@alltypesorc_part@ds=2011 +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2011).ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@alltypesorc_part@ds=2012 +POSTHOOK: query: insert overwrite table alltypesorc_part partition (ds='2012') select * from alltypesorc limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@alltypesorc_part@ds=2012 +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cbigint, type:bigint, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cboolean1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean1, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cboolean2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cboolean2, type:boolean, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).cstring2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring2, type:string, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).ctimestamp1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).ctimestamp2 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp2, type:timestamp, comment:null), ] +POSTHOOK: Lineage: alltypesorc_part PARTITION(ds=2012).ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: select count(cdouble), cint from alltypesorc_part where ds='2011' group by cint limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc_part +PREHOOK: Input: default@alltypesorc_part@ds=2011 +#### A masked pattern was here #### +POSTHOOK: query: select count(cdouble), cint from alltypesorc_part where ds='2011' group by cint limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc_part +POSTHOOK: Input: default@alltypesorc_part@ds=2011 +#### A masked pattern was here #### +100 528534767 +PREHOOK: query: select count(*) from alltypesorc_part A join alltypesorc_part B on A.ds=B.ds +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc_part +PREHOOK: Input: default@alltypesorc_part@ds=2011 +PREHOOK: Input: default@alltypesorc_part@ds=2012 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from alltypesorc_part A join alltypesorc_part B on A.ds=B.ds +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc_part +POSTHOOK: Input: default@alltypesorc_part@ds=2011 +POSTHOOK: Input: default@alltypesorc_part@ds=2012 +#### A masked pattern was here #### +20000 diff --git a/ql/src/test/results/clientpositive/tez/vectorization_pushdown.q.out b/ql/src/test/results/clientpositive/tez/vectorization_pushdown.q.out new file mode 100644 index 0000000..e7b133f --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorization_pushdown.q.out @@ -0,0 +1,74 @@ +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: explain SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble +PREHOOK: type: QUERY +POSTHOOK: query: explain SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc + filterExpr: (cbigint < cdouble) (type: boolean) + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (cbigint < cdouble) (type: boolean) + Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: cbigint + Statistics: Num rows: 4096 Data size: 125745 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(cbigint) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT AVG(cbigint) FROM alltypesorc WHERE cbigint < cdouble +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-1.4670720493864927E9 diff --git a/ql/src/test/results/clientpositive/tez/vectorized_bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/tez/vectorized_bucketmapjoin1.q.out new file mode 100644 index 0000000..e00b7bb --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorized_bucketmapjoin1.q.out @@ -0,0 +1,371 @@ +PREHOOK: query: create table vsmb_bucket_1(key int, value string) + CLUSTERED BY (key) + SORTED BY (key) INTO 1 BUCKETS + STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vsmb_bucket_1 +POSTHOOK: query: create table vsmb_bucket_1(key int, value string) + CLUSTERED BY (key) + SORTED BY (key) INTO 1 BUCKETS + STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vsmb_bucket_1 +PREHOOK: query: create table vsmb_bucket_2(key int, value string) + CLUSTERED BY (key) + SORTED BY (key) INTO 1 BUCKETS + STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vsmb_bucket_2 +POSTHOOK: query: create table vsmb_bucket_2(key int, value string) + CLUSTERED BY (key) + SORTED BY (key) INTO 1 BUCKETS + STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vsmb_bucket_2 +PREHOOK: query: create table vsmb_bucket_RC(key int, value string) + CLUSTERED BY (key) + SORTED BY (key) INTO 1 BUCKETS + STORED AS RCFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vsmb_bucket_RC +POSTHOOK: query: create table vsmb_bucket_RC(key int, value string) + CLUSTERED BY (key) + SORTED BY (key) INTO 1 BUCKETS + STORED AS RCFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vsmb_bucket_RC +PREHOOK: query: create table vsmb_bucket_TXT(key int, value string) + CLUSTERED BY (key) + SORTED BY (key) INTO 1 BUCKETS + STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vsmb_bucket_TXT +POSTHOOK: query: create table vsmb_bucket_TXT(key int, value string) + CLUSTERED BY (key) + SORTED BY (key) INTO 1 BUCKETS + STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vsmb_bucket_TXT +PREHOOK: query: insert into table vsmb_bucket_1 select cint, cstring1 from alltypesorc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@vsmb_bucket_1 +POSTHOOK: query: insert into table vsmb_bucket_1 select cint, cstring1 from alltypesorc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@vsmb_bucket_1 +POSTHOOK: Lineage: vsmb_bucket_1.key SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: vsmb_bucket_1.value SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: insert into table vsmb_bucket_2 select cint, cstring1 from alltypesorc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@vsmb_bucket_2 +POSTHOOK: query: insert into table vsmb_bucket_2 select cint, cstring1 from alltypesorc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@vsmb_bucket_2 +POSTHOOK: Lineage: vsmb_bucket_2.key SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: vsmb_bucket_2.value SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: insert into table vsmb_bucket_RC select cint, cstring1 from alltypesorc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@vsmb_bucket_rc +POSTHOOK: query: insert into table vsmb_bucket_RC select cint, cstring1 from alltypesorc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@vsmb_bucket_rc +POSTHOOK: Lineage: vsmb_bucket_rc.key SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: vsmb_bucket_rc.value SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: insert into table vsmb_bucket_TXT select cint, cstring1 from alltypesorc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@vsmb_bucket_txt +POSTHOOK: query: insert into table vsmb_bucket_TXT select cint, cstring1 from alltypesorc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@vsmb_bucket_txt +POSTHOOK: Lineage: vsmb_bucket_txt.key SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: vsmb_bucket_txt.value SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +PREHOOK: query: explain +select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@vsmb_bucket_1 +PREHOOK: Input: default@vsmb_bucket_2 +#### A masked pattern was here #### +POSTHOOK: query: select /*+MAPJOIN(a)*/ * from vsmb_bucket_1 a join vsmb_bucket_2 b on a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vsmb_bucket_1 +POSTHOOK: Input: default@vsmb_bucket_2 +#### A masked pattern was here #### +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +PREHOOK: query: explain +select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 50 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 25 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 1 Data size: 25 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@vsmb_bucket_1 +PREHOOK: Input: default@vsmb_bucket_rc +#### A masked pattern was here #### +POSTHOOK: query: select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_RC b on a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vsmb_bucket_1 +POSTHOOK: Input: default@vsmb_bucket_rc +#### A masked pattern was here #### +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +PREHOOK: query: -- RC file does not yet provide the vectorized CommonRCFileformat out-of-the-box +-- explain +-- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key; +-- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key; + +explain +select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: -- RC file does not yet provide the vectorized CommonRCFileformat out-of-the-box +-- explain +-- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key; +-- select /*+MAPJOIN(b)*/ * from vsmb_bucket_RC a join vsmb_bucket_2 b on a.key = b.key; + +explain +select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 2 Data size: 52 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 1 Data size: 26 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 2 Data size: 208 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: string) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@vsmb_bucket_1 +PREHOOK: Input: default@vsmb_bucket_txt +#### A masked pattern was here #### +POSTHOOK: query: select /*+MAPJOIN(b)*/ * from vsmb_bucket_1 a join vsmb_bucket_TXT b on a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vsmb_bucket_1 +POSTHOOK: Input: default@vsmb_bucket_txt +#### A masked pattern was here #### +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p +528534767 cvLH6Eat2yFsyy7p 528534767 cvLH6Eat2yFsyy7p diff --git a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out new file mode 100644 index 0000000..449bdb8 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out @@ -0,0 +1,95 @@ +PREHOOK: query: explain +select + csmallint, + case + when csmallint = 418 then "a" + when csmallint = 12205 then "b" + else "c" + end, + case csmallint + when 418 then "a" + when 12205 then "b" + else "c" + end +from alltypesorc +where csmallint = 418 +or csmallint = 12205 +or csmallint = 10583 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select + csmallint, + case + when csmallint = 418 then "a" + when csmallint = 12205 then "b" + else "c" + end, + case csmallint + when 418 then "a" + when 12205 then "b" + else "c" + end +from alltypesorc +where csmallint = 418 +or csmallint = 12205 +or csmallint = 10583 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: (((csmallint = 418) or (csmallint = 12205)) or (csmallint = 10583)) (type: boolean) + Select Operator + expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string) + outputColumnNames: _col0, _col1, _col2 + ListSink + +PREHOOK: query: select + csmallint, + case + when csmallint = 418 then "a" + when csmallint = 12205 then "b" + else "c" + end, + case csmallint + when 418 then "a" + when 12205 then "b" + else "c" + end +from alltypesorc +where csmallint = 418 +or csmallint = 12205 +or csmallint = 10583 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select + csmallint, + case + when csmallint = 418 then "a" + when csmallint = 12205 then "b" + else "c" + end, + case csmallint + when 418 then "a" + when 12205 then "b" + else "c" + end +from alltypesorc +where csmallint = 418 +or csmallint = 12205 +or csmallint = 10583 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +12205 b b +10583 c c +418 a a +12205 b b diff --git a/ql/src/test/results/clientpositive/tez/vectorized_context.q.out b/ql/src/test/results/clientpositive/tez/vectorized_context.q.out new file mode 100644 index 0000000..c7f0562 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorized_context.q.out @@ -0,0 +1,338 @@ +PREHOOK: query: create table store(s_store_sk int, s_city string) +stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@store +POSTHOOK: query: create table store(s_store_sk int, s_city string) +stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@store +PREHOOK: query: insert overwrite table store +select cint, cstring1 +from alltypesorc +where cint not in ( +-3728, -563, 762, 6981, 253665376, 528534767, 626923679) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@store +POSTHOOK: query: insert overwrite table store +select cint, cstring1 +from alltypesorc +where cint not in ( +-3728, -563, 762, 6981, 253665376, 528534767, 626923679) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@store +POSTHOOK: Lineage: store.s_city SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: store.s_store_sk SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: create table store_sales(ss_store_sk int, ss_hdemo_sk int, ss_net_profit double) +stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@store_sales +POSTHOOK: query: create table store_sales(ss_store_sk int, ss_hdemo_sk int, ss_net_profit double) +stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@store_sales +PREHOOK: query: insert overwrite table store_sales +select cint, cint, cdouble +from alltypesorc +where cint not in ( +-3728, -563, 762, 6981, 253665376, 528534767, 626923679) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@store_sales +POSTHOOK: query: insert overwrite table store_sales +select cint, cint, cdouble +from alltypesorc +where cint not in ( +-3728, -563, 762, 6981, 253665376, 528534767, 626923679) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@store_sales +POSTHOOK: Lineage: store_sales.ss_hdemo_sk SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: store_sales.ss_net_profit SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: store_sales.ss_store_sk SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: create table household_demographics(hd_demo_sk int) +stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@household_demographics +POSTHOOK: query: create table household_demographics(hd_demo_sk int) +stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@household_demographics +PREHOOK: query: insert overwrite table household_demographics +select cint +from alltypesorc +where cint not in ( +-3728, -563, 762, 6981, 253665376, 528534767, 626923679) +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@household_demographics +POSTHOOK: query: insert overwrite table household_demographics +select cint +from alltypesorc +where cint not in ( +-3728, -563, 762, 6981, 253665376, 528534767, 626923679) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@household_demographics +POSTHOOK: Lineage: household_demographics.hd_demo_sk SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: explain +select store.s_city, ss_net_profit +from store_sales +JOIN store ON store_sales.ss_store_sk = store.s_store_sk +JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk +limit 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select store.s_city, ss_net_profit +from store_sales +JOIN store ON store_sales.ss_store_sk = store.s_store_sk +JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk +limit 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE), Map 3 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: household_demographics + Statistics: Num rows: 6075 Data size: 24300 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: hd_demo_sk is not null (type: boolean) + Statistics: Num rows: 3038 Data size: 12152 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: hd_demo_sk (type: int) + sort order: + + Map-reduce partition columns: hd_demo_sk (type: int) + Statistics: Num rows: 3038 Data size: 12152 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Map 2 + Map Operator Tree: + TableScan + alias: store + Statistics: Num rows: 6075 Data size: 615632 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: s_store_sk is not null (type: boolean) + Statistics: Num rows: 3038 Data size: 307866 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {ss_hdemo_sk} {ss_net_profit} + 1 {s_city} + keys: + 0 ss_store_sk (type: int) + 1 s_store_sk (type: int) + outputColumnNames: _col1, _col2, _col7 + input vertices: + 0 Map 3 + Statistics: Num rows: 3341 Data size: 338652 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {_col2} {_col7} + 1 + keys: + 0 _col1 (type: int) + 1 hd_demo_sk (type: int) + outputColumnNames: _col2, _col7 + input vertices: + 1 Map 1 + Statistics: Num rows: 3675 Data size: 372517 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col7 (type: string), _col2 (type: double) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 3675 Data size: 372517 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 100 + Statistics: Num rows: 100 Data size: 10100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 100 Data size: 10100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Map 3 + Map Operator Tree: + TableScan + alias: store_sales + Statistics: Num rows: 6075 Data size: 72736 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ss_store_sk is not null and ss_hdemo_sk is not null) (type: boolean) + Statistics: Num rows: 1519 Data size: 18186 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ss_store_sk (type: int) + sort order: + + Map-reduce partition columns: ss_store_sk (type: int) + Statistics: Num rows: 1519 Data size: 18186 Basic stats: COMPLETE Column stats: NONE + value expressions: ss_hdemo_sk (type: int), ss_net_profit (type: double) + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: 100 + Processor Tree: + ListSink + +PREHOOK: query: select store.s_city, ss_net_profit +from store_sales +JOIN store ON store_sales.ss_store_sk = store.s_store_sk +JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk +limit 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@household_demographics +PREHOOK: Input: default@store +PREHOOK: Input: default@store_sales +#### A masked pattern was here #### +POSTHOOK: query: select store.s_city, ss_net_profit +from store_sales +JOIN store ON store_sales.ss_store_sk = store.s_store_sk +JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk +limit 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@household_demographics +POSTHOOK: Input: default@store +POSTHOOK: Input: default@store_sales +#### A masked pattern was here #### +LFgU5WT87C2yJ4W4YU0r8Pp NULL +v3p153e2bSkGS70v04G NULL +0pOH7A4O8aQ37NuBqn NULL +8ShAFcD734S8Q26WjMwpq0Q NULL +nOF31ehjY7ULCHMf NULL +t32s57Cjt4a250qQgVNAB5T NULL +nvO822k30OaH37Il NULL +M152O NULL +FgJ7Hft6845s1766oyt82q NULL +0ovL2T NULL +3e27C1jTdTQPdvCWi4if NULL +XWIExC7NI3bqu6VhR14g2 NULL +6g482F6IEbD2mKeLE153e0w NULL +2diFRgr78diK6rSl0J NULL +21UE6fJyy NULL +H3bTj310QaL012cPe NULL +7342q5oFQL8QIl7cO NULL +VkXY4IOSO NULL +4K1nnlkt7786Sq8x0ARXtr NULL +m4eSLx4qihVg1e32 NULL +OSBq0b NULL +aKbAu2WJV8HWHU6K1Ukq NULL +LcfhOxSVg68ACRvw1xC7LU NULL +AwVW3sV2gsM NULL +Tqar00A NULL +mC4mr NULL +YHVB0 NULL +2vtmB0qNlHlGV15P1p NULL +2wbgE0Yo1RX82H2sp4f1l5 NULL +BSmA3fAai62QpNjmL66y8d NULL +314nQ6nVj NULL +H8mh48T7 NULL +U616In80F54RI NULL +BuSLb058f2 NULL +OSc0r NULL +75KN62a2iAf0j5Jol77wH7 NULL +66Mx4v NULL +7SchQY2j74BW7dQNy5G5 NULL +FEefA NULL +P2DNeo00PA7DJF0 NULL +SMXqH NULL +6fB40r75kxeX3k10 NULL +AmYxfSOBdJv8B48l0VAeeI NULL +S87OO NULL +0EIL81O NULL +dG8B5PQ3b85U362G6huu NULL +XOypj8 NULL +61eT82N24 NULL +lVfv3fD1jn532h3K67H NULL +J1an665U NULL +Y6P8Ji868U7u8W3X2GHNiOLh NULL +wXbLC0LS2bFf12f1ljC NULL +j0L50J2e82 NULL +8EPG0Xi307qd NULL +04Y1mA17 NULL +lTLWdPg0yM0IgY76s70 NULL +KDr0tMRnCJJIBA84 NULL +71KN0p4NhE4xm4ixm NULL +u6HT8fTw6IgPf2 NULL +7WYO11kWn6fT2pOlh5sTDIwG NULL +Yc6gaH2OFF7cymt8q23Fr NULL +RQbQ5 NULL +75Y6J NULL +eUx01FREb2LD4kle4dpS NULL +T0Y8Vi41EYW4CpQ6Hg1Xg30w NULL +Egf7KV7TeT NULL +LIJuG07tfqoLu8K NULL +uUTO41xk6VyqYPh NULL +aEvOE7hUNO0d67AM3V7BwUCK NULL +8AqHq NULL +gl03UrAU4bWrOvqwwf NULL +NULL NULL +LX6QHG6sEmBAIbA6e6Am24 NULL +i330V4Y0Lm4ajyKqM1X2Y NULL +64K51WMTs NULL +iW12567av NULL +v3U315C36UQ4oEW NULL +niiH6MSNaSk4fRRb74o1y28c NULL +p4WmTkrM NULL +L1Q62u2 NULL +hnrm68NiEQCL4 NULL +fju0XS06MyUS7Nqk8P8 NULL +0VWukLt NULL +642LsMiNArr0ufitL3l7RCU7 NULL +DWNvg304j4KTMEs2174Cy1 NULL +DU1m68i1Q7W3 NULL +44vcS2S5wu684R05fq01fu NULL +eu3X5Qfp4sHv5H NULL +QbdFB1d7vfaM7 NULL +s43i4lU NULL +0pOTqi3O44rEnGQ NULL +32cB3f NULL +c300w5 NULL +w66f63n NULL +iR76SEs2C4V NULL +ss2PoJAipj6B1tn75O NULL +n3ner11ab4 NULL +r17jGvc7gR NULL +5G1Xp277YJRklEO5kHx NULL +B78T0SnxlCe5AQ522GBUf6c6 NULL +PREHOOK: query: drop table store +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@store +PREHOOK: Output: default@store +POSTHOOK: query: drop table store +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@store +POSTHOOK: Output: default@store +PREHOOK: query: drop table store_sales +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@store_sales +PREHOOK: Output: default@store_sales +POSTHOOK: query: drop table store_sales +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@store_sales +POSTHOOK: Output: default@store_sales +PREHOOK: query: drop table household_demographics +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@household_demographics +PREHOOK: Output: default@household_demographics +POSTHOOK: query: drop table household_demographics +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@household_demographics +POSTHOOK: Output: default@household_demographics diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out new file mode 100644 index 0000000..b0a5bcb --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out @@ -0,0 +1,5450 @@ +PREHOOK: query: select distinct ds from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select distinct ds from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2008-04-08 +2008-04-09 +PREHOOK: query: select distinct hr from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select distinct hr from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +11 +12 +PREHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +PREHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +POSTHOOK: type: CREATETABLE_AS_SELECT +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-2, Stage-0 + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart_date + Execution mode: vectorized + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-4 + Create Table Operator: + Create Table + columns: ds string, date string + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat + serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.srcpart_date + + Stage: Stage-3 + Stats-Aggr Operator + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: create table srcpart_date stored as orc as select ds as ds, ds as date from srcpart group by ds +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_date +POSTHOOK: query: create table srcpart_date stored as orc as select ds as ds, ds as date from srcpart group by ds +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_date +PREHOOK: query: create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_hour +POSTHOOK: query: create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_hour +PREHOOK: query: create table srcpart_date_hour stored as orc as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_date_hour +POSTHOOK: query: create table srcpart_date_hour stored as orc as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_date_hour +PREHOOK: query: create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_double_hour +POSTHOOK: query: create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_double_hour +PREHOOK: query: -- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE) + Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + value expressions: hr (type: string) + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: string) + sort order: + + Map-reduce partition columns: hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col2} + 1 + outputColumnNames: _col3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col3 (type: string) + sort order: + + Map-reduce partition columns: _col3 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE) + Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds is not null and hr is not null) (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + value expressions: hr (type: string) + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: string) + sort order: + + Map-reduce partition columns: hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col2} + 1 + outputColumnNames: _col3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col3 (type: string) + sort order: + + Map-reduce partition columns: _col3 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +500 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds is not null and hr is not null) (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +500 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +POSTHOOK: query: -- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +0 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +0 +PREHOOK: query: select count(*) from srcpart where ds = 'I DONT EXIST' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = 'I DONT EXIST' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +#### A masked pattern was here #### +0 +PREHOOK: query: -- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(hr) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(hr) (type: double) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: UDFToDouble(hr) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: (hr * 2) (type: double) + sort order: + + Map-reduce partition columns: (hr * 2) (type: double) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: double) + sort order: + + Map-reduce partition columns: hr (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: hr (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: (hr * 2) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(hr) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(hr) (type: double) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: (hr * 2) (type: double) + sort order: + + Map-reduce partition columns: (hr * 2) (type: double) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: double) + sort order: + + Map-reduce partition columns: hr (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: UDFToString((hr * 2)) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToString((hr * 2)) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToString((hr * 2)) (type: string) + sort order: + + Map-reduce partition columns: UDFToString((hr * 2)) (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToString(hr) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToString(hr) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToString(hr) (type: string) + sort order: + + Map-reduce partition columns: UDFToString(hr) (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: UDFToString(hr) (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: UDFToString((hr * 2)) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (cast(srcpart.hr*2 as string) = cast(srcpart_double_hour.hr as string)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where cast(hr as string) = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where cast(hr as string) = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) + Reducer 5 <- Map 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Target column: ds + Target Vertex: Map 4 + Map 4 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds is not null and (ds = '2008-04-08')) (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Reducer 5 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1000 +Warning: Shuffle Join MERGEJOIN[13][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product +PREHOOK: query: -- non-equi join +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +PREHOOK: type: QUERY +POSTHOOK: query: -- non-equi join +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + value expressions: ds (type: string), hr (type: string) + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: ((date = '2008-04-08') and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((date = '2008-04-08') and (hour = 11)) (type: boolean) + Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 360 Basic stats: COMPLETE Column stats: NONE + value expressions: ds (type: string), hr (type: string) + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col2} {VALUE._col3} + 1 {VALUE._col0} {VALUE._col2} + outputColumnNames: _col2, _col3, _col7, _col9 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col2 = _col7) or (_col3 = _col9)) (type: boolean) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +Warning: Shuffle Join MERGEJOIN[13][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product +PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +1500 +PREHOOK: query: -- old style join syntax +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +PREHOOK: type: QUERY +POSTHOOK: query: -- old style join syntax +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} + 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} + outputColumnNames: _col2, _col3, _col7, _col9 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col2 = _col7) and (_col3 = _col9)) (type: boolean) + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +500 +PREHOOK: query: -- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + value expressions: date (type: string) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 + 1 {VALUE._col0} + outputColumnNames: _col8 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col8 = '2008-04-08') (type: boolean) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (date = '2008-04-08') (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (date = '2008-04-08') (type: boolean) + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + value expressions: date (type: string) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Outer Join 0 to 1 + condition expressions: + 0 + 1 {VALUE._col0} + outputColumnNames: _col8 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col8 = '2008-04-08') (type: boolean) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE) + Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr = 11) (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + value expressions: hr (type: string) + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: '11' (type: string) + sort order: + + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: '11' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col2} + 1 + outputColumnNames: _col3 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col3 (type: string) + sort order: + + Map-reduce partition columns: _col3 (type: string) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 6 (SIMPLE_EDGE) + Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map 5 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hr = 13)) (type: boolean) + Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hr = 13)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: '13' (type: string) + sort order: + + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 6 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: '13' (type: string) + sort order: + + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +0 +PREHOOK: query: -- union + subquery +EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +POSTHOOK: query: -- union + subquery +EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 4 <- Map 6 (SIMPLE_EDGE), Union 3 (SIMPLE_EDGE) + Reducer 5 <- Reducer 4 (SIMPLE_EDGE) + Reducer 8 <- Map 7 (SIMPLE_EDGE), Union 3 (CONTAINS) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 6 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 7 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 6 + Reducer 4 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 + 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Reducer 8 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 6 + Union 3 + Vertex: Union 3 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2000 +PREHOOK: query: EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 4 <- Map 6 (SIMPLE_EDGE), Union 3 (SIMPLE_EDGE) + Reducer 5 <- Reducer 4 (SIMPLE_EDGE) + Reducer 8 <- Map 7 (SIMPLE_EDGE), Union 3 (CONTAINS) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 6 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 7 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 6 + Reducer 4 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} + 1 + outputColumnNames: _col2 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col2 (type: string) + outputColumnNames: _col2 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col2 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reducer 5 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Reducer 8 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 6 + Union 3 + Vertex: Union 3 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2008-04-08 +2008-04-09 +PREHOOK: query: EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 11 <- Map 10 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 4 <- Union 3 (SIMPLE_EDGE), Union 7 (SIMPLE_EDGE) + Reducer 6 <- Map 5 (SIMPLE_EDGE), Union 7 (CONTAINS) + Reducer 9 <- Map 8 (SIMPLE_EDGE), Union 7 (CONTAINS) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 10 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 5 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 8 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reducer 11 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 5 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 8 + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 5 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 8 + Reducer 4 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} + 1 + outputColumnNames: _col0 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 6 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Execution mode: vectorized + Reducer 9 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Execution mode: vectorized + Union 3 + Vertex: Union 3 + Union 7 + Vertex: Union 7 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select ds from (select distinct(ds) as ds from srcpart union all select distinct(ds) as ds from srcpart) s where s.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2008-04-08 +2008-04-08 +2008-04-09 +2008-04-09 +PREHOOK: query: -- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- single column, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- multiple sources, single key +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {hr} + 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col3 + input vertices: + 1 Map 4 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col3 (type: string) + 1 hr (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: string) + sort order: + + Map-reduce partition columns: hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where hr = 11 and ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- multiple columns single source +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 ds (type: string), hr (type: string) + 1 ds (type: string), hr (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and hr is not null) and (date = '2008-04-08')) and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), hr (type: string) + sort order: ++ + Map-reduce partition columns: ds (type: string), hr (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Select Operator + expressions: hr (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_date_hour +#### A masked pattern was here #### +500 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' and hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +#### A masked pattern was here #### +500 +PREHOOK: query: -- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: type: QUERY +POSTHOOK: query: -- empty set +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = 'I DONT EXIST')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; + +-- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; + +-- expressions +EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 UDFToDouble(hr) (type: double) + 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double) + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (UDFToDouble(UDFToInteger((hr / 2))) is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: UDFToDouble(UDFToInteger((hr / 2))) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: UDFToDouble(hr) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr * 2) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 (hr * 2) (type: double) + 1 hr (type: double) + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_double_hour + filterExpr: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hour = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: hr (type: double) + sort order: + + Map-reduce partition columns: hr (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: hr (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: (hr * 2) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart_double_hour.hr) where srcpart_double_hour.hour = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Input: default@srcpart_double_hour +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- parent is reduce tasks +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Reducer 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 4 <- Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 ds (type: string) + 1 _col0 (type: string) + input vertices: + 1 Reducer 4 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (ds is not null and (ds = '2008-04-08')) (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: ds (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Reducer 4 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where ds = '2008-04-08' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: -- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- left join +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 + 1 {date} + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col8 + input vertices: + 1 Map 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col8 = '2008-04-08') (type: boolean) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + value expressions: date (type: string) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 2 <- Map 1 (BROADCAST_EDGE) + Reducer 3 <- Map 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 2 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (date = '2008-04-08') (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (date = '2008-04-08') (type: boolean) + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Outer Join0 to 1 + condition expressions: + 0 + 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + input vertices: + 1 Map 1 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: type: QUERY +POSTHOOK: query: -- full outer +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + value expressions: date (type: string) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Outer Join 0 to 1 + condition expressions: + 0 + 1 {VALUE._col0} + outputColumnNames: _col8 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (_col8 = '2008-04-08') (type: boolean) + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +PREHOOK: type: QUERY +POSTHOOK: query: -- with static pruning +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: (hr = 11) (type: boolean) + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {hr} + 1 + keys: + 0 ds (type: string) + 1 ds (type: string) + outputColumnNames: _col3 + input vertices: + 1 Map 4 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 _col3 (type: string) + 1 '11' (type: string) + input vertices: + 1 Map 3 + Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((hr is not null and (hour = 11)) and (hr = 11)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: '11' (type: string) + sort order: + + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: '11' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: hr + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart_date +PREHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Input: default@srcpart_hour +#### A masked pattern was here #### +500 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE), Map 4 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_hour + filterExpr: (hr is not null and (hr = 13)) (type: boolean) + Statistics: Num rows: 2 Data size: 344 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (hr is not null and (hr = 13)) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: '13' (type: string) + sort order: + + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: srcpart_date + filterExpr: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (ds is not null and (date = '2008-04-08')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string) + sort order: + + Map-reduce partition columns: ds (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +-- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; + +-- union + subquery +EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +POSTHOOK: query: -- Disabled until TEZ-1486 is fixed +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +-- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; + +-- union + subquery +EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 4 <- Union 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 5 <- Map 4 (SIMPLE_EDGE) + Reducer 7 <- Map 6 (SIMPLE_EDGE), Union 3 (CONTAINS) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Map 4 + Map Operator Tree: + TableScan + alias: srcpart + filterExpr: ds is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Left Semi Join 0 to 1 + condition expressions: + 0 {ds} + 1 + keys: + 0 ds (type: string) + 1 _col0 (type: string) + outputColumnNames: _col2 + input vertices: + 1 Union 3 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col2 (type: string) + outputColumnNames: _col2 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col2 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE + Map 6 + Map Operator Tree: + TableScan + alias: srcpart + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: ds + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(ds) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 4 + Reducer 5 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + Reducer 7 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Filter Operator + predicate: _col0 is not null (type: boolean) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Select Operator + expressions: _col0 (type: string) + outputColumnNames: _col0 + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Dynamic Partitioning Event Operator + Target Input: srcpart + Partition key expr: ds + Target column: ds + Target Vertex: Map 4 + Union 3 + Vertex: Union 3 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +2008-04-08 +2008-04-09 +PREHOOK: query: -- different file format +create table srcpart_orc (key int, value string) partitioned by (ds string, hr int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcpart_orc +POSTHOOK: query: -- different file format +create table srcpart_orc (key int, value string) partitioned by (ds string, hr int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcpart_orc +PREHOOK: query: insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@srcpart_orc +POSTHOOK: query: insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@srcpart_orc@ds=2008-04-08/hr=11 +POSTHOOK: Output: default@srcpart_orc@ds=2008-04-08/hr=12 +POSTHOOK: Output: default@srcpart_orc@ds=2008-04-09/hr=11 +POSTHOOK: Output: default@srcpart_orc@ds=2008-04-09/hr=12 +POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-08,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-08,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=11).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Map 1 <- Map 3 (BROADCAST_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: srcpart_orc + filterExpr: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 2000 Data size: 188000 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(hr) is not null (type: boolean) + Statistics: Num rows: 1000 Data size: 94000 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 + keys: + 0 ds (type: string), UDFToDouble(hr) (type: double) + 1 ds (type: string), UDFToDouble(hr) (type: double) + input vertices: + 1 Map 3 + Statistics: Num rows: 1100 Data size: 103400 Basic stats: COMPLETE Column stats: NONE + Select Operator + Statistics: Num rows: 1100 Data size: 103400 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: srcpart_date_hour + filterExpr: (((ds is not null and UDFToDouble(hr) is not null) and (hour = 11)) and ((date = '2008-04-08') or (date = '2008-04-09'))) (type: boolean) + Statistics: Num rows: 4 Data size: 1440 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (((ds is not null and UDFToDouble(hr) is not null) and (hour = 11)) and ((date = '2008-04-08') or (date = '2008-04-09'))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: ds (type: string), UDFToDouble(hr) (type: double) + sort order: ++ + Map-reduce partition columns: ds (type: string), UDFToDouble(hr) (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: ds (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart_orc + Partition key expr: ds + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: ds + Target Vertex: Map 1 + Select Operator + expressions: UDFToDouble(hr) (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + keys: _col0 (type: double) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Dynamic Partitioning Event Operator + Target Input: srcpart_orc + Partition key expr: UDFToDouble(hr) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Target column: hr + Target Vertex: Map 1 + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart_date_hour +PREHOOK: Input: default@srcpart_orc +PREHOOK: Input: default@srcpart_orc@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart_orc@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart_orc@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart_orc@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart_date_hour +POSTHOOK: Input: default@srcpart_orc +POSTHOOK: Input: default@srcpart_orc@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart_orc@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart_orc@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart_orc@ds=2008-04-09/hr=12 +#### A masked pattern was here #### +1000 +PREHOOK: query: select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11 +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +#### A masked pattern was here #### +1000 +PREHOOK: query: drop table srcpart_orc +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_orc +PREHOOK: Output: default@srcpart_orc +POSTHOOK: query: drop table srcpart_orc +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_orc +POSTHOOK: Output: default@srcpart_orc +PREHOOK: query: drop table srcpart_date +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_date +PREHOOK: Output: default@srcpart_date +POSTHOOK: query: drop table srcpart_date +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_date +POSTHOOK: Output: default@srcpart_date +PREHOOK: query: drop table srcpart_hour +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_hour +PREHOOK: Output: default@srcpart_hour +POSTHOOK: query: drop table srcpart_hour +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_hour +POSTHOOK: Output: default@srcpart_hour +PREHOOK: query: drop table srcpart_date_hour +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_date_hour +PREHOOK: Output: default@srcpart_date_hour +POSTHOOK: query: drop table srcpart_date_hour +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_date_hour +POSTHOOK: Output: default@srcpart_date_hour +PREHOOK: query: drop table srcpart_double_hour +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@srcpart_double_hour +PREHOOK: Output: default@srcpart_double_hour +POSTHOOK: query: drop table srcpart_double_hour +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@srcpart_double_hour +POSTHOOK: Output: default@srcpart_double_hour diff --git a/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out index 7e45d66..5aa9599 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out @@ -50,6 +50,8 @@ STAGE PLANS: 0 cint (type: int) 1 cint (type: int) outputColumnNames: _col2, _col17 + input vertices: + 1 Map 1 Statistics: Num rows: 6758 Data size: 207479 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: int), _col17 (type: int) diff --git a/ql/src/test/results/clientpositive/tez/vectorized_math_funcs.q.out b/ql/src/test/results/clientpositive/tez/vectorized_math_funcs.q.out new file mode 100644 index 0000000..444b9e0 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorized_math_funcs.q.out @@ -0,0 +1,247 @@ +PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. + +explain +select + cdouble + ,Round(cdouble, 2) + ,Floor(cdouble) + ,Ceil(cdouble) + ,Rand() + ,Rand(98007) + ,Exp(ln(cdouble)) + ,Ln(cdouble) + ,Ln(cfloat) + ,Log10(cdouble) + -- Use log2 as a representative function to test all input types. + ,Log2(cdouble) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdouble - 15601.0) + ,Log2(cfloat) + ,Log2(cbigint) + ,Log2(cint) + ,Log2(csmallint) + ,Log2(ctinyint) + ,Log(2.0, cdouble) + ,Pow(log2(cdouble), 2.0) + ,Power(log2(cdouble), 2.0) + ,Sqrt(cdouble) + ,Sqrt(cbigint) + ,Bin(cbigint) + ,Hex(cdouble) + ,Conv(cbigint, 10, 16) + ,Abs(cdouble) + ,Abs(ctinyint) + ,Pmod(cint, 3) + ,Sin(cdouble) + ,Asin(cdouble) + ,Cos(cdouble) + ,ACos(cdouble) + ,Atan(cdouble) + ,Degrees(cdouble) + ,Radians(cdouble) + ,Positive(cdouble) + ,Positive(cbigint) + ,Negative(cdouble) + ,Sign(cdouble) + ,Sign(cbigint) + -- Test nesting + ,cos(-sin(log(cdouble)) + 3.14159) +from alltypesorc +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cfloat) >= -1.0 +PREHOOK: type: QUERY +POSTHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. + +explain +select + cdouble + ,Round(cdouble, 2) + ,Floor(cdouble) + ,Ceil(cdouble) + ,Rand() + ,Rand(98007) + ,Exp(ln(cdouble)) + ,Ln(cdouble) + ,Ln(cfloat) + ,Log10(cdouble) + -- Use log2 as a representative function to test all input types. + ,Log2(cdouble) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdouble - 15601.0) + ,Log2(cfloat) + ,Log2(cbigint) + ,Log2(cint) + ,Log2(csmallint) + ,Log2(ctinyint) + ,Log(2.0, cdouble) + ,Pow(log2(cdouble), 2.0) + ,Power(log2(cdouble), 2.0) + ,Sqrt(cdouble) + ,Sqrt(cbigint) + ,Bin(cbigint) + ,Hex(cdouble) + ,Conv(cbigint, 10, 16) + ,Abs(cdouble) + ,Abs(ctinyint) + ,Pmod(cint, 3) + ,Sin(cdouble) + ,Asin(cdouble) + ,Cos(cdouble) + ,ACos(cdouble) + ,Atan(cdouble) + ,Degrees(cdouble) + ,Radians(cdouble) + ,Positive(cdouble) + ,Positive(cbigint) + ,Negative(cdouble) + ,Sign(cdouble) + ,Sign(cbigint) + -- Test nesting + ,cos(-sin(log(cdouble)) + 3.14159) +from alltypesorc +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cfloat) >= -1.0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: (((cbigint % 500) = 0) and (sin(cfloat) >= -1.0)) (type: boolean) + Select Operator + expressions: cdouble (type: double), round(cdouble, 2) (type: double), floor(cdouble) (type: bigint), ceil(cdouble) (type: bigint), rand() (type: double), rand(98007) (type: double), exp(ln(cdouble)) (type: double), ln(cdouble) (type: double), ln(cfloat) (type: double), log10(cdouble) (type: double), log2(cdouble) (type: double), log2((cdouble - 15601.0)) (type: double), log2(cfloat) (type: double), log2(cbigint) (type: double), log2(cint) (type: double), log2(csmallint) (type: double), log2(ctinyint) (type: double), log(2.0, cdouble) (type: double), power(log2(cdouble), 2.0) (type: double), power(log2(cdouble), 2.0) (type: double), sqrt(cdouble) (type: double), sqrt(cbigint) (type: double), bin(cbigint) (type: string), hex(cdouble) (type: string), conv(cbigint, 10, 16) (type: string), abs(cdouble) (type: double), abs(ctinyint) (type: int), (cint pmod 3) (type: int), sin(cdouble) (type: double), asin(cdouble) (type: double), cos(cdouble) (type: double), acos(cdouble) (type: double), atan(cdouble) (type: double), degrees(cdouble) (type: double), radians(cdouble) (type: double), cdouble (type: double), cbigint (type: bigint), (- cdouble) (type: double), sign(cdouble) (type: double), sign(cbigint) (type: double), cos(((- sin(log(cdouble))) + 3.14159)) (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40 + ListSink + +PREHOOK: query: select + cdouble + ,Round(cdouble, 2) + ,Floor(cdouble) + ,Ceil(cdouble) + -- Omit rand() from runtime test because it's nondeterministic. + -- ,Rand() + ,Rand(98007) + ,Exp(ln(cdouble)) + ,Ln(cdouble) + ,Ln(cfloat) + ,Log10(cdouble) + -- Use log2 as a representative function to test all input types. + ,Log2(cdouble) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdouble - 15601.0) + ,Log2(cfloat) + ,Log2(cbigint) + ,Log2(cint) + ,Log2(csmallint) + ,Log2(ctinyint) + ,Log(2.0, cdouble) + ,Pow(log2(cdouble), 2.0) + ,Power(log2(cdouble), 2.0) + ,Sqrt(cdouble) + ,Sqrt(cbigint) + ,Bin(cbigint) + ,Hex(cdouble) + ,Conv(cbigint, 10, 16) + ,Abs(cdouble) + ,Abs(ctinyint) + ,Pmod(cint, 3) + ,Sin(cdouble) + ,Asin(cdouble) + ,Cos(cdouble) + ,ACos(cdouble) + ,Atan(cdouble) + ,Degrees(cdouble) + ,Radians(cdouble) + ,Positive(cdouble) + ,Positive(cbigint) + ,Negative(cdouble) + ,Sign(cdouble) + ,Sign(cbigint) + -- Test nesting + ,cos(-sin(log(cdouble)) + 3.14159) +from alltypesorc +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cfloat) >= -1.0 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select + cdouble + ,Round(cdouble, 2) + ,Floor(cdouble) + ,Ceil(cdouble) + -- Omit rand() from runtime test because it's nondeterministic. + -- ,Rand() + ,Rand(98007) + ,Exp(ln(cdouble)) + ,Ln(cdouble) + ,Ln(cfloat) + ,Log10(cdouble) + -- Use log2 as a representative function to test all input types. + ,Log2(cdouble) + -- Use 15601.0 to test zero handling, as there are no zeroes in the table + ,Log2(cdouble - 15601.0) + ,Log2(cfloat) + ,Log2(cbigint) + ,Log2(cint) + ,Log2(csmallint) + ,Log2(ctinyint) + ,Log(2.0, cdouble) + ,Pow(log2(cdouble), 2.0) + ,Power(log2(cdouble), 2.0) + ,Sqrt(cdouble) + ,Sqrt(cbigint) + ,Bin(cbigint) + ,Hex(cdouble) + ,Conv(cbigint, 10, 16) + ,Abs(cdouble) + ,Abs(ctinyint) + ,Pmod(cint, 3) + ,Sin(cdouble) + ,Asin(cdouble) + ,Cos(cdouble) + ,ACos(cdouble) + ,Atan(cdouble) + ,Degrees(cdouble) + ,Radians(cdouble) + ,Positive(cdouble) + ,Positive(cbigint) + ,Negative(cdouble) + ,Sign(cdouble) + ,Sign(cbigint) + -- Test nesting + ,cos(-sin(log(cdouble)) + 3.14159) +from alltypesorc +-- limit output to a reasonably small number of rows +where cbigint % 500 = 0 +-- test use of a math function in the WHERE clause +and sin(cfloat) >= -1.0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-200.0 -200.0 -200 -200 0.8199077823142826 NULL NULL NULL NULL NULL NULL NULL 30.57531565116074 NULL NULL NULL NULL NULL NULL NULL 39998.48747140321 1011111010111000011011101011000 2D3230302E30 5F5C3758 200.0 36 NULL 0.8732972972139946 NaN 0.4871876750070059 NaN -1.5657963684609384 -11459.155902616465 -3.490658503988659 -200.0 1599879000 200.0 -1.0 1.0 NULL +15601.0 15601.0 15601 15601 0.38656833237681376 15601.00000000001 9.65509029374725 NULL 4.193152436852078 13.929350886124324 NULL NULL NULL NULL 13.929350886124324 NULL 13.929350886124324 194.02681610877246 194.02681610877246 124.90396310766124 NULL 1111111111111111111111111111111110010001001101101010100000011000 31353630312E30 FFFFFFFF9136A818 15601.0 38 NULL -0.14856570831397706 NaN 0.9889025383288114 NaN 1.5707322283397571 893871.4561835973 272.2888166036353 15601.0 -1858689000 -15601.0 1.0 -1.0 -0.9740573096878733 +15601.0 15601.0 15601 15601 0.41161398527282966 15601.00000000001 9.65509029374725 NULL 4.193152436852078 13.929350886124324 NULL NULL 29.18993673432575 NULL 13.929350886124324 NULL 13.929350886124324 194.02681610877246 194.02681610877246 124.90396310766124 24747.04022706554 100100100000001011101000000000 31353630312E30 2480BA00 15601.0 5 NULL -0.14856570831397706 NaN 0.9889025383288114 NaN 1.5707322283397571 893871.4561835973 272.2888166036353 15601.0 612416000 -15601.0 1.0 1.0 -0.9740573096878733 +15601.0 15601.0 15601 15601 0.37807863784568585 15601.00000000001 9.65509029374725 3.871201010907891 4.193152436852078 13.929350886124324 NULL 5.584962500721157 NULL NULL 13.929350886124324 5.584962500721157 13.929350886124324 194.02681610877246 194.02681610877246 124.90396310766124 NULL 1111111111111111111111111111111111010000100101111100000100011000 31353630312E30 FFFFFFFFD097C118 15601.0 48 NULL -0.14856570831397706 NaN 0.9889025383288114 NaN 1.5707322283397571 893871.4561835973 272.2888166036353 15601.0 -795361000 -15601.0 1.0 -1.0 -0.9740573096878733 +NULL NULL NULL NULL 0.3336458983920575 NULL NULL 2.0794415416798357 NULL NULL NULL 3.0 29.693388204506274 29.58473549442715 NULL 3.0 NULL NULL NULL NULL 29464.580431426475 110011101111110001011111011100 NULL 33BF17DC NULL 8 1 NULL NULL NULL NULL NULL NULL NULL NULL 868161500 NULL NULL 1.0 NULL +NULL NULL NULL NULL 0.8681331660942196 NULL NULL 2.0794415416798357 NULL NULL NULL 3.0 NULL 29.730832334348488 NULL 3.0 NULL NULL NULL NULL NULL 1111111111111111111111111111111110010000111111111000101010111000 NULL FFFFFFFF90FF8AB8 NULL 8 0 NULL NULL NULL NULL NULL NULL NULL NULL -1862301000 NULL NULL -1.0 NULL +-7196.0 -7196.0 -7196 -7196 0.03951015606275099 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1111111111111111111111111111111110100000010101110101001001110000 2D373139362E30 FFFFFFFFA0575270 7196.0 59 NULL -0.9834787875028149 NaN -0.18102340879563897 NaN -1.5706573607035177 -412300.4293761404 -125.59389297351194 -7196.0 -1604890000 7196.0 -1.0 -1.0 NULL +-7196.0 -7196.0 -7196 -7196 0.9209252022050654 NULL NULL NULL NULL NULL NULL NULL 30.52255693577237 NULL NULL NULL NULL NULL NULL NULL 39273.76987252433 1011011111011111001100101001000 2D373139362E30 5BEF9948 7196.0 21 NULL -0.9834787875028149 NaN -0.18102340879563897 NaN -1.5706573607035177 -412300.4293761404 -125.59389297351194 -7196.0 1542429000 7196.0 -1.0 1.0 NULL +-7196.0 -7196.0 -7196 -7196 0.4533660450429132 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1111111111111111111111111111111110100011011110110101000010110100 2D373139362E30 FFFFFFFFA37B50B4 7196.0 14 NULL -0.9834787875028149 NaN -0.18102340879563897 NaN -1.5706573607035177 -412300.4293761404 -125.59389297351194 -7196.0 -1552199500 7196.0 -1.0 -1.0 NULL +-7196.0 -7196.0 -7196 -7196 0.14567136069921982 NULL NULL 4.07753744390572 NULL NULL NULL 5.882643049361842 NULL NULL NULL 5.882643049361842 NULL NULL NULL NULL NULL 1111111111111111111111111111111110111100001011110011111001111100 2D373139362E30 FFFFFFFFBC2F3E7C 7196.0 59 NULL -0.9834787875028149 NaN -0.18102340879563897 NaN -1.5706573607035177 -412300.4293761404 -125.59389297351194 -7196.0 -1137754500 7196.0 -1.0 -1.0 NULL +-7196.0 -7196.0 -7196 -7196 0.5264452612398715 NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL 1111111111111111111111111111111110010001101110110101111010110100 2D373139362E30 FFFFFFFF91BB5EB4 7196.0 8 NULL -0.9834787875028149 NaN -0.18102340879563897 NaN -1.5706573607035177 -412300.4293761404 -125.59389297351194 -7196.0 -1849991500 7196.0 -1.0 -1.0 NULL +-7196.0 -7196.0 -7196 -7196 0.17837094616515647 NULL NULL 1.6094379124341003 NULL NULL NULL 2.321928094887362 NULL NULL NULL 2.321928094887362 NULL NULL NULL NULL NULL 1111111111111111111111111111111111000011011101110000111100110100 2D373139362E30 FFFFFFFFC3770F34 7196.0 5 NULL -0.9834787875028149 NaN -0.18102340879563897 NaN -1.5706573607035177 -412300.4293761404 -125.59389297351194 -7196.0 -1015607500 7196.0 -1.0 -1.0 NULL +-7196.0 -7196.0 -7196 -7196 0.5456857574763374 NULL NULL NULL NULL NULL NULL NULL 29.62699001935971 NULL NULL NULL NULL NULL NULL NULL 28794.287627930647 110001011010110011101011011000 2D373139362E30 316B3AD8 7196.0 24 NULL -0.9834787875028149 NaN -0.18102340879563897 NaN -1.5706573607035177 -412300.4293761404 -125.59389297351194 -7196.0 829111000 7196.0 -1.0 1.0 NULL +NULL NULL NULL NULL 0.282703740641956 NULL NULL 2.3978952727983707 NULL NULL NULL 3.4594316186372978 30.19990821555368 NULL NULL 3.4594316186372978 NULL NULL NULL NULL 35118.75567271711 1001001100000110001001110011000 NULL 49831398 NULL 11 1 NULL NULL NULL NULL NULL NULL NULL NULL 1233327000 NULL NULL 1.0 NULL diff --git a/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out index e46c1a9..83c4bb8 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out @@ -33,6 +33,8 @@ STAGE PLANS: 0 _col0 (type: smallint) 1 csmallint (type: smallint) outputColumnNames: _col1 + input vertices: + 0 Map 4 Statistics: Num rows: 6758 Data size: 207479 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: double) @@ -81,6 +83,8 @@ STAGE PLANS: 0 ctinyint (type: tinyint) 1 ctinyint (type: tinyint) outputColumnNames: _col0, _col1, _col5, _col15 + input vertices: + 0 Map 3 Statistics: Num rows: 6758 Data size: 207479 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col0 = _col15) (type: boolean) diff --git a/ql/src/test/results/clientpositive/tez/vectorized_parquet.q.out b/ql/src/test/results/clientpositive/tez/vectorized_parquet.q.out new file mode 100644 index 0000000..0607cea --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorized_parquet.q.out @@ -0,0 +1,328 @@ +PREHOOK: query: create table if not exists alltypes_parquet ( + cint int, + ctinyint tinyint, + csmallint smallint, + cfloat float, + cdouble double, + cstring1 string) stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@alltypes_parquet +POSTHOOK: query: create table if not exists alltypes_parquet ( + cint int, + ctinyint tinyint, + csmallint smallint, + cfloat float, + cdouble double, + cstring1 string) stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@alltypes_parquet +PREHOOK: query: insert overwrite table alltypes_parquet + select cint, + ctinyint, + csmallint, + cfloat, + cdouble, + cstring1 + from alltypesorc +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@alltypes_parquet +POSTHOOK: query: insert overwrite table alltypes_parquet + select cint, + ctinyint, + csmallint, + cfloat, + cdouble, + cstring1 + from alltypesorc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@alltypes_parquet +POSTHOOK: Lineage: alltypes_parquet.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] +POSTHOOK: Lineage: alltypes_parquet.cfloat SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cfloat, type:float, comment:null), ] +POSTHOOK: Lineage: alltypes_parquet.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +POSTHOOK: Lineage: alltypes_parquet.csmallint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:csmallint, type:smallint, comment:null), ] +POSTHOOK: Lineage: alltypes_parquet.cstring1 SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] +POSTHOOK: Lineage: alltypes_parquet.ctinyint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +PREHOOK: query: explain select * + from alltypes_parquet + where cint = 528534767 + limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain select * + from alltypes_parquet + where cint = 528534767 + limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + TableScan + alias: alltypes_parquet + Filter Operator + predicate: (cint = 528534767) (type: boolean) + Select Operator + expressions: 528534767 (type: int), ctinyint (type: tinyint), csmallint (type: smallint), cfloat (type: float), cdouble (type: double), cstring1 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Limit + Number of rows: 10 + ListSink + +PREHOOK: query: select * + from alltypes_parquet + where cint = 528534767 + limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypes_parquet +#### A masked pattern was here #### +POSTHOOK: query: select * + from alltypes_parquet + where cint = 528534767 + limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypes_parquet +#### A masked pattern was here #### +528534767 -50 -13326 -50.0 -13326.0 cvLH6Eat2yFsyy7p +528534767 NULL -4213 NULL -4213.0 cvLH6Eat2yFsyy7p +528534767 -28 -15813 -28.0 -15813.0 cvLH6Eat2yFsyy7p +528534767 31 -9566 31.0 -9566.0 cvLH6Eat2yFsyy7p +528534767 -34 15007 -34.0 15007.0 cvLH6Eat2yFsyy7p +528534767 29 7021 29.0 7021.0 cvLH6Eat2yFsyy7p +528534767 31 4963 31.0 4963.0 cvLH6Eat2yFsyy7p +528534767 27 -7824 27.0 -7824.0 cvLH6Eat2yFsyy7p +528534767 -11 -15431 -11.0 -15431.0 cvLH6Eat2yFsyy7p +528534767 61 -15549 61.0 -15549.0 cvLH6Eat2yFsyy7p +PREHOOK: query: explain select ctinyint, + max(cint), + min(csmallint), + count(cstring1), + avg(cfloat), + stddev_pop(cdouble) + from alltypes_parquet + group by ctinyint +PREHOOK: type: QUERY +POSTHOOK: query: explain select ctinyint, + max(cint), + min(csmallint), + count(cstring1), + avg(cfloat), + stddev_pop(cdouble) + from alltypes_parquet + group by ctinyint +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypes_parquet + Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint), cint (type: int), csmallint (type: smallint), cstring1 (type: string), cfloat (type: float), cdouble (type: double) + outputColumnNames: ctinyint, cint, csmallint, cstring1, cfloat, cdouble + Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(cint), min(csmallint), count(cstring1), avg(cfloat), stddev_pop(cdouble) + keys: ctinyint (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Map-reduce partition columns: _col0 (type: tinyint) + Statistics: Num rows: 12288 Data size: 73728 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: int), _col2 (type: smallint), _col3 (type: bigint), _col4 (type: struct), _col5 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0), min(VALUE._col1), count(VALUE._col2), avg(VALUE._col3), stddev_pop(VALUE._col4) + keys: KEY._col0 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 6144 Data size: 36864 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: tinyint), _col1 (type: int), _col2 (type: smallint), _col3 (type: bigint), _col4 (type: double), _col5 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 6144 Data size: 36864 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6144 Data size: 36864 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select ctinyint, + max(cint), + min(csmallint), + count(cstring1), + avg(cfloat), + stddev_pop(cdouble) + from alltypes_parquet + group by ctinyint +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypes_parquet +#### A masked pattern was here #### +POSTHOOK: query: select ctinyint, + max(cint), + min(csmallint), + count(cstring1), + avg(cfloat), + stddev_pop(cdouble) + from alltypes_parquet + group by ctinyint +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypes_parquet +#### A masked pattern was here #### +NULL 1073418988 -16379 3115 NULL 305051.4870777435 +-64 626923679 -15920 21 -64.0 9254.456539277186 +-63 626923679 -12516 16 -63.0 9263.605837223322 +-62 626923679 -15992 24 -62.0 9004.593091474135 +-61 626923679 -15142 22 -61.0 9357.236187870849 +-60 626923679 -15792 24 -60.0 9892.656196775464 +-59 626923679 -15789 28 -59.0 9829.790704244733 +-58 626923679 -15169 20 -58.0 9549.096672008198 +-57 626923679 -14893 32 -57.0 8572.083461570477 +-56 626923679 -11999 33 -56.0 9490.842152672341 +-55 626923679 -13381 26 -55.0 9157.562103946742 +-54 626923679 -14815 23 -54.0 9614.154026896626 +-53 626923679 -15445 19 -53.0 9387.739325499799 +-52 626923679 -16369 30 -52.0 8625.06871423408 +-51 1073680599 -15734 1028 -51.0 9531.569305177045 +-50 626923679 -14320 27 -50.0 8548.827748002343 +-49 626923679 -14831 23 -49.0 9894.429191738676 +-48 626923679 -15462 26 -48.0 9913.883371354861 +-47 626923679 -16096 19 -47.0 9011.009178780589 +-46 626923679 -12427 21 -46.0 9182.943188188632 +-45 626923679 -15027 21 -45.0 8567.489593562543 +-44 626923679 -15667 21 -44.0 10334.01810499552 +-43 626923679 -15607 27 -43.0 8715.255026265124 +-42 626923679 -16025 14 -42.0 9692.646755759979 +-41 626923679 -12606 21 -41.0 9034.40949481481 +-40 626923679 -14678 23 -40.0 9883.334986561835 +-39 626923679 -15612 19 -39.0 9765.551806305297 +-38 626923679 -14914 28 -38.0 8767.375358291503 +-37 626923679 -14780 17 -37.0 10368.905538788269 +-36 626923679 -16208 23 -36.0 8773.547684436919 +-35 626923679 -16059 23 -35.0 10136.580492864763 +-34 626923679 -15450 29 -34.0 8708.243526705026 +-33 626923679 -12779 21 -33.0 8854.331159704514 +-32 626923679 -15866 25 -32.0 9535.546396775915 +-31 626923679 -15915 22 -31.0 9187.596784112568 +-30 626923679 -14863 23 -30.0 9193.941914019653 +-29 626923679 -14747 26 -29.0 9052.945656011721 +-28 626923679 -15813 20 -28.0 9616.869413270924 +-27 626923679 -14984 20 -27.0 8465.29660255097 +-26 626923679 -15686 15 -26.0 10874.523900405318 +-25 626923679 -15862 24 -25.0 9778.256724727018 +-24 626923679 -16311 26 -24.0 9386.736402961187 +-23 626923679 -16355 36 -23.345263230173213 9401.831290253447 +-22 626923679 -14701 22 -22.0 8809.230165774987 +-21 626923679 -16017 27 -21.0 9480.349236669877 +-20 626923679 -16126 24 -20.0 9868.92268080106 +-19 626923679 -15935 25 -19.0 9967.22240685782 +-18 626923679 -14863 24 -18.0 9638.430684071413 +-17 626923679 -15922 19 -17.0 9944.104273894172 +-16 626923679 -15154 21 -16.0 8884.207393686478 +-15 626923679 -16036 24 -15.0 9450.506254395024 +-14 626923679 -13884 22 -14.0 10125.818731386042 +-13 626923679 -15446 30 -13.0 8907.942987576693 +-12 626923679 -16373 22 -12.0 10173.15707541171 +-11 626923679 -15659 32 -11.0 10453.738567408038 +-10 626923679 -15384 28 -10.0 8850.451610567823 +-9 626923679 -15329 31 -9.0 8999.391457373968 +-8 626923679 -14678 18 -8.0 9976.831992670684 +-7 626923679 -14584 23 -7.0 9946.605446407746 +-6 626923679 -15980 30 -6.0 10262.829252317424 +-5 626923679 -15780 24 -5.0 10599.227726422314 +-4 626923679 -16207 21 -4.0 9682.726604102581 +-3 626923679 -13632 16 -3.0 8836.215573422822 +-2 626923679 -16277 20 -2.0 10800.090249507177 +-1 626923679 -15441 36 -1.0486250072717667 8786.246963933321 +0 626923679 -14254 24 0.0 10057.5018088718 +1 626923679 -14610 30 1.0 10016.486277900643 +2 626923679 -16227 25 2.0 10083.276127543355 +3 626923679 -16339 30 3.0 10483.526375885149 +4 626923679 -15999 29 4.0 9516.189702058042 +5 626923679 -16169 31 5.0 11114.001902469323 +6 626923679 -15948 30 6.0 9644.247255286113 +7 626923679 -15839 25 7.0 10077.151640330823 +8 1070764888 -15778 1034 8.0 9562.355155774725 +9 626923679 -13629 25 9.0 10157.217948808622 +10 626923679 -15887 26 10.0 9104.820520135108 +11 1072654057 -14696 1035 11.0 9531.018991371746 +12 626923679 -14642 18 12.0 9696.038286378725 +13 626923679 -14771 26 13.0 8128.265919972384 +14 626923679 -13367 28 14.0 9074.674998750581 +15 626923679 -16339 28 15.0 9770.473400901916 +16 626923679 -14001 26 16.0 10130.883606275334 +17 626923679 -16109 22 16.73235294865627 1353416.3383574807 +18 626923679 -15779 21 18.0 10820.004053788869 +19 626923679 -16049 21 19.0 9423.560227007669 +20 626923679 -15149 21 20.0 11161.893298093504 +21 626923679 -15931 23 21.0 9683.044864861204 +22 626923679 -16280 26 22.0 9693.155720861765 +23 626923679 -15514 24 23.0 8542.419116415425 +24 626923679 -15086 24 24.0 9661.203790645088 +25 626923679 -11349 23 25.0 8888.959012093468 +26 626923679 -14516 29 26.0 9123.125508880432 +27 626923679 -14965 24 27.0 9802.871860196345 +28 626923679 -14455 20 28.0 9283.289383115296 +29 626923679 -15892 16 29.0 9874.046501817154 +30 626923679 -14111 27 30.0 10066.520234676527 +31 626923679 -15960 24 31.0 10427.970184550613 +32 626923679 -14044 24 32.0 8376.464579403413 +33 626923679 -14642 29 40.61776386607777 1304429.5939037625 +34 626923679 -15059 28 34.0 8756.731536033676 +35 626923679 -16153 27 35.0 10351.008404963042 +36 626923679 -15912 20 36.0 9475.257975138164 +37 626923679 -12081 24 37.0 9017.860034890362 +38 626923679 -15248 29 38.0 9900.256257785535 +39 626923679 -14887 28 39.0 10513.343644635232 +40 626923679 -15861 22 40.0 9283.318678549174 +41 626923679 -13480 21 41.0 9016.291129937847 +42 626923679 -15834 28 42.0 10318.01399719996 +43 626923679 -15703 28 43.0 8757.796089055722 +44 626923679 -11185 16 44.0 9425.076634933797 +45 626923679 -15228 18 45.0 9459.968668643689 +46 626923679 -15187 22 46.0 9685.908173160062 +47 626923679 -16324 22 47.0 9822.220821743611 +48 626923679 -16372 29 48.0 10079.286173063345 +49 626923679 -15923 27 49.0 9850.111848934683 +50 626923679 -16236 21 50.0 9398.176197406601 +51 626923679 -15790 17 51.0 9220.075799194028 +52 626923679 -15450 20 52.0 9261.723648435052 +53 626923679 -16217 30 53.0 9895.247408969733 +54 626923679 -15245 16 54.0 9789.50878424882 +55 626923679 -15887 21 55.0 9826.38569192808 +56 626923679 -12631 21 56.0 8860.917133763547 +57 626923679 -15620 25 57.0 9413.99393840875 +58 626923679 -13627 20 58.0 9083.529665947459 +59 626923679 -16076 17 59.0 10117.44967077967 +60 626923679 -13606 23 60.0 8346.267436552042 +61 626923679 -15894 29 61.0 8785.714950987198 +62 626923679 -14307 17 62.0 9491.752726667326 diff --git a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out index ad4ac4e..9473714 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out @@ -650,15 +650,16 @@ STAGE PLANS: /part [p2] Execution mode: vectorized Reducer 2 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Position of Big Table: 0 Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2072,15 +2073,16 @@ STAGE PLANS: Truncated Path -> Alias: /part [part] Reducer 2 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Position of Big Table: 0 Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string) @@ -2385,15 +2387,16 @@ STAGE PLANS: Truncated Path -> Alias: /part [part] Reducer 2 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} outputColumnNames: _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 + Position of Big Table: 0 Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) @@ -4266,15 +4269,16 @@ STAGE PLANS: Truncated Path -> Alias: /part [part] Reducer 2 - Needs Tagging: true + Needs Tagging: false Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {VALUE._col6} 1 outputColumnNames: _col1, _col2, _col5, _col7 + Position of Big Table: 0 Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col2 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/vectorized_rcfile_columnar.q.out b/ql/src/test/results/clientpositive/tez/vectorized_rcfile_columnar.q.out new file mode 100644 index 0000000..ee8959b --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorized_rcfile_columnar.q.out @@ -0,0 +1,62 @@ +PREHOOK: query: --This query must pass even when vectorized reader is not available for +--RC files. The query must fall back to the non-vector mode and run successfully. + +CREATE table columnTable (key STRING, value STRING) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +STORED AS + INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' + OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@columnTable +POSTHOOK: query: --This query must pass even when vectorized reader is not available for +--RC files. The query must fall back to the non-vector mode and run successfully. + +CREATE table columnTable (key STRING, value STRING) +ROW FORMAT SERDE + 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +STORED AS + INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' + OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@columnTable +PREHOOK: query: FROM src +INSERT OVERWRITE TABLE columnTable SELECT src.key, src.value LIMIT 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@columntable +POSTHOOK: query: FROM src +INSERT OVERWRITE TABLE columnTable SELECT src.key, src.value LIMIT 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@columntable +POSTHOOK: Lineage: columntable.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: columntable.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: describe columnTable +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@columntable +POSTHOOK: query: describe columnTable +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@columntable +key string +value string +PREHOOK: query: SELECT key, value FROM columnTable ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@columntable +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, value FROM columnTable ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@columntable +#### A masked pattern was here #### +165 val_165 +238 val_238 +255 val_255 +27 val_27 +278 val_278 +311 val_311 +409 val_409 +484 val_484 +86 val_86 +98 val_98 diff --git a/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out index e69c90e..d65e6c0 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out @@ -48,7 +48,7 @@ STAGE PLANS: Execution mode: vectorized Reducer 2 Reduce Operator Tree: - Join Operator + Merge Join Operator condition map: Inner Join 0 to 1 condition expressions: diff --git a/ql/src/test/results/clientpositive/tez/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/tez/vectorized_string_funcs.q.out new file mode 100644 index 0000000..0463d31 --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/vectorized_string_funcs.q.out @@ -0,0 +1,123 @@ +PREHOOK: query: -- Test string functions in vectorized mode to verify end-to-end functionality. + +explain +select + substr(cstring1, 1, 2) + ,substr(cstring1, 2) + ,lower(cstring1) + ,upper(cstring1) + ,ucase(cstring1) + ,length(cstring1) + ,trim(cstring1) + ,ltrim(cstring1) + ,rtrim(cstring1) + ,concat(cstring1, cstring2) + ,concat('>', cstring1) + ,concat(cstring1, '<') + ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) +from alltypesorc +-- Limit the number of rows of output to a reasonable amount. +where cbigint % 237 = 0 +-- Test function use in the WHERE clause. +and length(substr(cstring1, 1, 2)) <= 2 +and cstring1 like '%' +PREHOOK: type: QUERY +POSTHOOK: query: -- Test string functions in vectorized mode to verify end-to-end functionality. + +explain +select + substr(cstring1, 1, 2) + ,substr(cstring1, 2) + ,lower(cstring1) + ,upper(cstring1) + ,ucase(cstring1) + ,length(cstring1) + ,trim(cstring1) + ,ltrim(cstring1) + ,rtrim(cstring1) + ,concat(cstring1, cstring2) + ,concat('>', cstring1) + ,concat(cstring1, '<') + ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) +from alltypesorc +-- Limit the number of rows of output to a reasonable amount. +where cbigint % 237 = 0 +-- Test function use in the WHERE clause. +and length(substr(cstring1, 1, 2)) <= 2 +and cstring1 like '%' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: alltypesorc + Filter Operator + predicate: ((((cbigint % 237) = 0) and (length(substr(cstring1, 1, 2)) <= 2)) and (cstring1 like '%')) (type: boolean) + Select Operator + expressions: substr(cstring1, 1, 2) (type: string), substr(cstring1, 2) (type: string), lower(cstring1) (type: string), upper(cstring1) (type: string), upper(cstring1) (type: string), length(cstring1) (type: int), trim(cstring1) (type: string), ltrim(cstring1) (type: string), rtrim(cstring1) (type: string), concat(cstring1, cstring2) (type: string), concat('>', cstring1) (type: string), concat(cstring1, '<') (type: string), concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + ListSink + +PREHOOK: query: select + substr(cstring1, 1, 2) + ,substr(cstring1, 2) + ,lower(cstring1) + ,upper(cstring1) + ,ucase(cstring1) + ,length(cstring1) + ,trim(cstring1) + ,ltrim(cstring1) + ,rtrim(cstring1) + ,concat(cstring1, cstring2) + ,concat('>', cstring1) + ,concat(cstring1, '<') + ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) +from alltypesorc +-- Limit the number of rows of output to a reasonable amount. +where cbigint % 237 = 0 +-- Test function use in the WHERE clause. +and length(substr(cstring1, 1, 2)) <= 2 +and cstring1 like '%' +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: select + substr(cstring1, 1, 2) + ,substr(cstring1, 2) + ,lower(cstring1) + ,upper(cstring1) + ,ucase(cstring1) + ,length(cstring1) + ,trim(cstring1) + ,ltrim(cstring1) + ,rtrim(cstring1) + ,concat(cstring1, cstring2) + ,concat('>', cstring1) + ,concat(cstring1, '<') + ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) +from alltypesorc +-- Limit the number of rows of output to a reasonable amount. +where cbigint % 237 = 0 +-- Test function use in the WHERE clause. +and length(substr(cstring1, 1, 2)) <= 2 +and cstring1 like '%' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +Vi iqXS6s88N1yr14lj7I viqxs6s88n1yr14lj7i VIQXS6S88N1YR14LJ7I VIQXS6S88N1YR14LJ7I 19 ViqXS6s88N1yr14lj7I ViqXS6s88N1yr14lj7I ViqXS6s88N1yr14lj7I ViqXS6s88N1yr14lj7ITh638b67kn8o >ViqXS6s88N1yr14lj7I ViqXS6s88N1yr14lj7I< ViTh +R4 4e7Gf r4e7gf R4E7GF R4E7GF 6 R4e7Gf R4e7Gf R4e7Gf R4e7GfPTBh56R3LS7L13sB4 >R4e7Gf R4e7Gf< R4PT +3g gubGh4J18TV 3gubgh4j18tv 3GUBGH4J18TV 3GUBGH4J18TV 12 3gubGh4J18TV 3gubGh4J18TV 3gubGh4J18TV 3gubGh4J18TVpJucOe4dN4R5XURJW8 >3gubGh4J18TV 3gubGh4J18TV< 3gpJ +EP PCRx8ObNv51rOF epcrx8obnv51rof EPCRX8OBNV51ROF EPCRX8OBNV51ROF 15 EPCRx8ObNv51rOF EPCRx8ObNv51rOF EPCRx8ObNv51rOF EPCRx8ObNv51rOFysaU2Xm11f715L0I35rut2 >EPCRx8ObNv51rOF EPCRx8ObNv51rOF< EPys +8e eiti74gc5m01xyMKSjUIx 8eiti74gc5m01xymksjuix 8EITI74GC5M01XYMKSJUIX 8EITI74GC5M01XYMKSJUIX 22 8eiti74gc5m01xyMKSjUIx 8eiti74gc5m01xyMKSjUIx 8eiti74gc5m01xyMKSjUIx 8eiti74gc5m01xyMKSjUIxI8x87Fm1J4hE8g4CWNo >8eiti74gc5m01xyMKSjUIx 8eiti74gc5m01xyMKSjUIx< 8eI8 +m0 0hbv1516qk8 m0hbv1516qk8 M0HBV1516QK8 M0HBV1516QK8 12 m0hbv1516qk8 m0hbv1516qk8 m0hbv1516qk8 m0hbv1516qk8N8i3sxF54C4x5h0 >m0hbv1516qk8 m0hbv1516qk8< m0N8 +uT T5e2 ut5e2 UT5E2 UT5E2 5 uT5e2 uT5e2 uT5e2 uT5e2SJp57VKYsDtA2r1Xb2H >uT5e2 uT5e2< uTSJ +l3 35W8012cM77E227Ts l35w8012cm77e227ts L35W8012CM77E227TS L35W8012CM77E227TS 18 l35W8012cM77E227Ts l35W8012cM77E227Ts l35W8012cM77E227Ts l35W8012cM77E227TsMH38bE >l35W8012cM77E227Ts l35W8012cM77E227Ts< l3MH +o1 1uPH5EflET5ts1RjSB74 o1uph5eflet5ts1rjsb74 O1UPH5EFLET5TS1RJSB74 O1UPH5EFLET5TS1RJSB74 21 o1uPH5EflET5ts1RjSB74 o1uPH5EflET5ts1RjSB74 o1uPH5EflET5ts1RjSB74 o1uPH5EflET5ts1RjSB74a1U3DRA788kW7I0UTF203 >o1uPH5EflET5ts1RjSB74 o1uPH5EflET5ts1RjSB74< o1a1 +Ix x8dXlDbC3S44L1FQJqpwa ix8dxldbc3s44l1fqjqpwa IX8DXLDBC3S44L1FQJQPWA IX8DXLDBC3S44L1FQJQPWA 22 Ix8dXlDbC3S44L1FQJqpwa Ix8dXlDbC3S44L1FQJqpwa Ix8dXlDbC3S44L1FQJqpwa Ix8dXlDbC3S44L1FQJqpwa8wQR4X28CiccBVXGqPL7 >Ix8dXlDbC3S44L1FQJqpwa Ix8dXlDbC3S44L1FQJqpwa< Ix8w +OT Tn0Dj2HiBi05Baq1Xt otn0dj2hibi05baq1xt OTN0DJ2HIBI05BAQ1XT OTN0DJ2HIBI05BAQ1XT 19 OTn0Dj2HiBi05Baq1Xt OTn0Dj2HiBi05Baq1Xt OTn0Dj2HiBi05Baq1Xt OTn0Dj2HiBi05Baq1XtAoQ21J1lQ27kYSmfA >OTn0Dj2HiBi05Baq1Xt OTn0Dj2HiBi05Baq1Xt< OTAo +a0 0P3sn1ihxJCsTLDb a0p3sn1ihxjcstldb A0P3SN1IHXJCSTLDB A0P3SN1IHXJCSTLDB 17 a0P3sn1ihxJCsTLDb a0P3sn1ihxJCsTLDb a0P3sn1ihxJCsTLDb a0P3sn1ihxJCsTLDbfT4Jlw38k8kmd6Dt1wv >a0P3sn1ihxJCsTLDb a0P3sn1ihxJCsTLDb< a0fT diff --git a/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out index b8e46e9..b5b74fb 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_timestamp_funcs.q.out @@ -1,10 +1,16 @@ PREHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +-- Turning on vectorization has been temporarily moved after filling the test table +-- due to bug HIVE-8197. + CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alltypesorc_string POSTHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +-- Turning on vectorization has been temporarily moved after filling the test table +-- due to bug HIVE-8197. + CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC POSTHOOK: type: CREATETABLE @@ -169,45 +175,45 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc_string #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL NULL NULL NULL +28784 1969 12 31 31 1 23 59 44 +28784 1969 12 31 31 1 23 59 44 +28784 1969 12 31 31 1 23 59 44 +28784 1969 12 31 31 1 23 59 44 +28785 1969 12 31 31 1 23 59 45 28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 -28786 1969 12 31 31 1 23 59 46 +28787 1969 12 31 31 1 23 59 47 +28788 1969 12 31 31 1 23 59 48 +28789 1969 12 31 31 1 23 59 49 +28789 1969 12 31 31 1 23 59 49 +28790 1969 12 31 31 1 23 59 50 +28792 1969 12 31 31 1 23 59 52 +28792 1969 12 31 31 1 23 59 52 +28792 1969 12 31 31 1 23 59 52 +28792 1969 12 31 31 1 23 59 52 +28795 1969 12 31 31 1 23 59 55 +28795 1969 12 31 31 1 23 59 55 +28795 1969 12 31 31 1 23 59 55 +28798 1969 12 31 31 1 23 59 58 +28798 1969 12 31 31 1 23 59 58 +28800 1970 1 1 1 1 0 0 0 +28800 1970 1 1 1 1 0 0 0 +28802 1970 1 1 1 1 0 0 2 +28803 1970 1 1 1 1 0 0 3 +28804 1970 1 1 1 1 0 0 4 +28804 1970 1 1 1 1 0 0 4 +28805 1970 1 1 1 1 0 0 5 +28805 1970 1 1 1 1 0 0 5 +28806 1970 1 1 1 1 0 0 6 +28807 1970 1 1 1 1 0 0 7 +28807 1970 1 1 1 1 0 0 7 +28807 1970 1 1 1 1 0 0 7 +28808 1970 1 1 1 1 0 0 8 +28808 1970 1 1 1 1 0 0 8 +28809 1970 1 1 1 1 0 0 9 +28811 1970 1 1 1 1 0 0 11 +28813 1970 1 1 1 1 0 0 13 +28814 1970 1 1 1 1 0 0 14 +28815 1970 1 1 1 1 0 0 15 PREHOOK: query: EXPLAIN SELECT to_unix_timestamp(stimestamp1) AS c1, year(stimestamp1), @@ -457,44 +463,44 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc_string #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL NULL NULL NULL -false false false false false true false false false -false true true true true true true true false -false true true true true true true true false -false true true true true true true true false -false false false false false true false false false -false false false false false true false false false -false false false false false true false false false -false true true true true true true true false -false true true true true true true true false -false true true true true true true true false -false false false false false true false false false -false false false false false true false false false -false false false false false true false false false -false true true true true true true true false -false false false false false true false false false -false true true true true true true true false -false false false false false true false false false -false false false false false true false false false -false true true true true true true true false -false true true true true true true true false -false false false false false true false false false -false false false false false true false false false -false false false false false true false false false -false false false false false true false false false -false true true true true true true true false -false true true true true true true true false -false true true true true true true true false -false false false false false true false false false -false true true true true true true true false -false false false false false true false false false -false true true true true true true true false -false false false false false true false false false -false true true true true true true true false -false true true true true true true true false -false true true true true true true true false -false false false false false true false false false -false false false false false true false false false -false true true true true true true true false +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true +true true true true true true true true true true true true true true true true true true PREHOOK: query: -- Wrong format. Should all be NULL. EXPLAIN SELECT @@ -539,15 +545,15 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_wrong - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: to_unix_timestamp(stimestamp1) (type: bigint), year(stimestamp1) (type: int), month(stimestamp1) (type: int), day(stimestamp1) (type: int), dayofmonth(stimestamp1) (type: int), weekofyear(stimestamp1) (type: int), hour(stimestamp1) (type: int), minute(stimestamp1) (type: int), second(stimestamp1) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int) Execution mode: vectorized Reducer 2 @@ -555,10 +561,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -604,3 +610,274 @@ POSTHOOK: Input: default@alltypesorc_wrong NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +PREHOOK: query: EXPLAIN SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc_string + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctimestamp1 (type: timestamp) + outputColumnNames: ctimestamp1 + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +POSTHOOK: query: SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +1969-12-31 23:59:44.088 1970-01-01 00:00:15.007 39 40 +PREHOOK: query: -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... +EXPLAIN SELECT + sum(ctimestamp1) +FROM alltypesorc_string +PREHOOK: type: QUERY +POSTHOOK: query: -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... +EXPLAIN SELECT + sum(ctimestamp1) +FROM alltypesorc_string +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc_string + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctimestamp1 (type: timestamp) + outputColumnNames: ctimestamp1 + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(ctimestamp1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + sum(ctimestamp1) +FROM alltypesorc_string +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +POSTHOOK: query: SELECT + sum(ctimestamp1) +FROM alltypesorc_string +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +1123143.8569999998 +PREHOOK: query: EXPLAIN SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: alltypesorc_string + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctimestamp1 (type: timestamp) + outputColumnNames: ctimestamp1 + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(ctimestamp1), variance(ctimestamp1), var_pop(ctimestamp1), var_samp(ctimestamp1), std(ctimestamp1), stddev(ctimestamp1), stddev_pop(ctimestamp1), stddev_samp(ctimestamp1) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct) + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +POSTHOOK: query: SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +2.8798560435897438E13 8.970772952794212E19 8.970772952794212E19 9.206845925236166E19 9.471416447815084E9 9.471416447815084E9 9.471416447815084E9 9.595231068211002E9 diff --git a/ql/src/test/results/clientpositive/udf4.q.out b/ql/src/test/results/clientpositive/udf4.q.out index 9ea9293..1dfd7f8 100644 --- a/ql/src/test/results/clientpositive/udf4.q.out +++ b/ql/src/test/results/clientpositive/udf4.q.out @@ -79,10 +79,10 @@ STAGE PLANS: Select Operator expressions: 1.0 (type: double), 2.0 (type: double), -2.0 (type: double), 1 (type: bigint), 1 (type: bigint), -2 (type: bigint), 1.0 (type: double), null (type: void), 0.0 (type: double), 1 (type: bigint), 2 (type: bigint), -1 (type: bigint), 1 (type: bigint), rand(3) (type: double), 3 (type: int), -3 (type: int), 3 (type: int), -1 (type: int), -2 (type: int), -2 (type: tinyint), -2 (type: smallint), -2 (type: bigint), 0 (type: tinyint), 0 (type: smallint), 0 (type: int), 0 (type: bigint), 3 (type: tinyint), 3 (type: smallint), 3 (type: int), 3 (type: bigint), 2 (type: tinyint), 2 (type: smallint), 2 (type: int), 2 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33 - Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 208 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 200 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/udf5.q.out b/ql/src/test/results/clientpositive/udf5.q.out index 5e06b16..26cf3f1 100644 --- a/ql/src/test/results/clientpositive/udf5.q.out +++ b/ql/src/test/results/clientpositive/udf5.q.out @@ -22,33 +22,21 @@ POSTHOOK: query: EXPLAIN SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: dest1 - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: '2008-11-11 15:32:20' (type: string), '2008-11-11' (type: string), 1 (type: int), 11 (type: int), 2008 (type: int), 1 (type: int), 11 (type: int), 2008 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 1 Data size: 221 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 221 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: dest1 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: '2008-11-11 15:32:20' (type: string), '2008-11-11' (type: string), 1 (type: int), 11 (type: int), 2008 (type: int), 1 (type: int), 11 (type: int), 2008 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 221 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: SELECT from_unixtime(1226446340), to_date(from_unixtime(1226446340)), day('2008-11-01'), month('2008-11-01'), year('2008-11-01'), day('2008-11-01 15:32:20'), month('2008-11-01 15:32:20'), year('2008-11-01 15:32:20') FROM dest1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf6.q.out b/ql/src/test/results/clientpositive/udf6.q.out index 922d6ed..1de47ab 100644 --- a/ql/src/test/results/clientpositive/udf6.q.out +++ b/ql/src/test/results/clientpositive/udf6.q.out @@ -22,33 +22,21 @@ POSTHOOK: query: EXPLAIN SELECT IF(TRUE, 1, 2) FROM dest1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: dest1 - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: 1 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: dest1 + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 1 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + ListSink PREHOOK: query: SELECT IF(TRUE, 1, 2) FROM dest1 PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf7.q.out b/ql/src/test/results/clientpositive/udf7.q.out index 05f2037..e258d0b 100644 --- a/ql/src/test/results/clientpositive/udf7.q.out +++ b/ql/src/test/results/clientpositive/udf7.q.out @@ -49,10 +49,10 @@ STAGE PLANS: Select Operator expressions: 1.098612288668 (type: double), null (type: void), null (type: void), 1.098612288668 (type: double), null (type: void), null (type: void), 1.584962500721 (type: double), null (type: void), null (type: void), 0.47712125472 (type: double), null (type: void), null (type: void), 1.584962500721 (type: double), null (type: void), null (type: void), null (type: void), -1.0 (type: double), 7.389056098931 (type: double), 8.0 (type: double), 8.0 (type: double), 0.125 (type: double), 8.0 (type: double), 2.0 (type: double), NaN (type: double), 1.0 (type: double), power(CAST( 1 AS decimal(10,0)), 0) (type: double), power(CAST( 2 AS decimal(10,0)), 3) (type: double), power(CAST( 2 AS decimal(10,0)), 3) (type: double) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27 - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/udf_case.q.out b/ql/src/test/results/clientpositive/udf_case.q.out index 7eab4ab..29905ab 100644 --- a/ql/src/test/results/clientpositive/udf_case.q.out +++ b/ql/src/test/results/clientpositive/udf_case.q.out @@ -91,7 +91,7 @@ STAGE PLANS: Select Operator expressions: 2 (type: int), 5 (type: int), 15 (type: int), null (type: void), CASE (17) WHEN (18) THEN (null) WHEN (17) THEN (20) END (type: int), 24 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 500 Data size: 12000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 10000 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: SELECT CASE 1 diff --git a/ql/src/test/results/clientpositive/udf_current_database.q.out b/ql/src/test/results/clientpositive/udf_current_database.q.out index 00a27a7..e22165e 100644 --- a/ql/src/test/results/clientpositive/udf_current_database.q.out +++ b/ql/src/test/results/clientpositive/udf_current_database.q.out @@ -10,34 +10,22 @@ POSTHOOK: query: explain select current_database() POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: _dummy_table - Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator - expressions: 'default' (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: 'default' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + ListSink PREHOOK: query: select current_database() PREHOOK: type: QUERY @@ -67,34 +55,22 @@ POSTHOOK: query: explain select current_database() POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: _dummy_table - Row Limit Per Split: 1 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator - expressions: 'xxx' (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: 'xxx' (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + ListSink PREHOOK: query: select current_database() PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf_elt.q.out b/ql/src/test/results/clientpositive/udf_elt.q.out index fe6c856..f8acbf2 100644 --- a/ql/src/test/results/clientpositive/udf_elt.q.out +++ b/ql/src/test/results/clientpositive/udf_elt.q.out @@ -54,7 +54,7 @@ STAGE PLANS: Select Operator expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), elt(null, 'abc', 'defg') (type: string), null (type: void), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10 - Statistics: Num rows: 500 Data size: 479500 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 395500 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: SELECT elt(2, 'abc', 'defg'), diff --git a/ql/src/test/results/clientpositive/udf_explode.q.out b/ql/src/test/results/clientpositive/udf_explode.q.out index e07a987..301b1b7 100644 --- a/ql/src/test/results/clientpositive/udf_explode.q.out +++ b/ql/src/test/results/clientpositive/udf_explode.q.out @@ -217,12 +217,12 @@ STAGE PLANS: keys: col (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: false @@ -282,17 +282,17 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -574,12 +574,12 @@ STAGE PLANS: keys: key (type: int), value (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: false @@ -639,17 +639,17 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/udf_if.q.out b/ql/src/test/results/clientpositive/udf_if.q.out index d696ad7..a2d2c08 100644 --- a/ql/src/test/results/clientpositive/udf_if.q.out +++ b/ql/src/test/results/clientpositive/udf_if.q.out @@ -2,12 +2,12 @@ PREHOOK: query: DESCRIBE FUNCTION if PREHOOK: type: DESCFUNCTION POSTHOOK: query: DESCRIBE FUNCTION if POSTHOOK: type: DESCFUNCTION -There is no documentation for function 'if' +IF(expr1,expr2,expr3) - If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2; otherwise it returns expr3. IF() returns a numeric or string value, depending on the context in which it is used. PREHOOK: query: DESCRIBE FUNCTION EXTENDED if PREHOOK: type: DESCFUNCTION POSTHOOK: query: DESCRIBE FUNCTION EXTENDED if POSTHOOK: type: DESCFUNCTION -There is no documentation for function 'if' +IF(expr1,expr2,expr3) - If expr1 is TRUE (expr1 <> 0 and expr1 <> NULL) then IF() returns expr2; otherwise it returns expr3. IF() returns a numeric or string value, depending on the context in which it is used. PREHOOK: query: EXPLAIN SELECT IF(TRUE, 1, 2) AS COL1, IF(FALSE, CAST(NULL AS STRING), CAST(1 AS STRING)) AS COL2, diff --git a/ql/src/test/results/clientpositive/udf_reflect2.q.out b/ql/src/test/results/clientpositive/udf_reflect2.q.out index aeb20ba..f445acb 100644 --- a/ql/src/test/results/clientpositive/udf_reflect2.q.out +++ b/ql/src/test/results/clientpositive/udf_reflect2.q.out @@ -308,104 +308,29 @@ TOK_QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - GatherStats: false - Select Operator - expressions: UDFToInteger(key) (type: int), value (type: string), 2013-02-15 19:41:20.0 (type: timestamp) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reflect2(_col0,'byteValue') (type: tinyint), reflect2(_col0,'shortValue') (type: smallint), reflect2(_col0,'intValue') (type: int), reflect2(_col0,'longValue') (type: bigint), reflect2(_col0,'floatValue') (type: float), reflect2(_col0,'doubleValue') (type: double), reflect2(_col0,'toString') (type: string), _col1 (type: string), reflect2(_col1,'concat','_concat') (type: string), reflect2(_col1,'contains','86') (type: boolean), reflect2(_col1,'startsWith','v') (type: boolean), reflect2(_col1,'endsWith','6') (type: boolean), reflect2(_col1,'equals','val_86') (type: boolean), reflect2(_col1,'equalsIgnoreCase','VAL_86') (type: boolean), reflect2(_col1,'getBytes') (type: binary), reflect2(_col1,'indexOf','1') (type: int), reflect2(_col1,'lastIndexOf','1') (type: int), reflect2(_col1,'replace','val','VALUE') (type: string), reflect2(_col1,'substring',1) (type: string), reflect2(_col1,'substring',1,5) (type: string), reflect2(_col1,'toUpperCase') (type: string), reflect2(_col1,'trim') (type: string), _col2 (type: timestamp), reflect2(_col2,'getYear') (type: int), reflect2(_col2,'getMonth') (type: int), reflect2(_col2,'getDay') (type: int), reflect2(_col2,'getHours') (type: int), reflect2(_col2,'getMinutes') (type: int), reflect2(_col2,'getSeconds') (type: int), reflect2(_col2,'getTime') (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 5 - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16,_col17,_col18,_col19,_col20,_col21,_col22,_col23,_col24,_col25,_col26,_col27,_col28,_col29,_col30 - columns.types int:tinyint:smallint:int:bigint:float:double:string:string:string:boolean:boolean:boolean:boolean:boolean:binary:int:int:string:string:string:string:string:timestamp:int:int:int:int:int:int:bigint - escape.delim \ - hive.serialization.extend.nesting.levels true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: src - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE true - bucket_count -1 - columns key,value - columns.comments defaultdefault - columns.types string:string -#### A masked pattern was here #### - name default.src - numFiles 1 - numRows 500 - rawDataSize 5312 - serialization.ddl struct src { string key, string value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 5812 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.src - name: default.src - Truncated Path -> Alias: - /src [a:src] - Stage: Stage-0 Fetch Operator limit: 5 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + GatherStats: false + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string), 2013-02-15 19:41:20.0 (type: timestamp) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), reflect2(_col0,'byteValue') (type: tinyint), reflect2(_col0,'shortValue') (type: smallint), reflect2(_col0,'intValue') (type: int), reflect2(_col0,'longValue') (type: bigint), reflect2(_col0,'floatValue') (type: float), reflect2(_col0,'doubleValue') (type: double), reflect2(_col0,'toString') (type: string), _col1 (type: string), reflect2(_col1,'concat','_concat') (type: string), reflect2(_col1,'contains','86') (type: boolean), reflect2(_col1,'startsWith','v') (type: boolean), reflect2(_col1,'endsWith','6') (type: boolean), reflect2(_col1,'equals','val_86') (type: boolean), reflect2(_col1,'equalsIgnoreCase','VAL_86') (type: boolean), reflect2(_col1,'getBytes') (type: binary), reflect2(_col1,'indexOf','1') (type: int), reflect2(_col1,'lastIndexOf','1') (type: int), reflect2(_col1,'replace','val','VALUE') (type: string), reflect2(_col1,'substring',1) (type: string), reflect2(_col1,'substring',1,5) (type: string), reflect2(_col1,'toUpperCase') (type: string), reflect2(_col1,'trim') (type: string), _col2 (type: timestamp), reflect2(_col2,'getYear') (type: int), reflect2(_col2,'getMonth') (type: int), reflect2(_col2,'getDay') (type: int), reflect2(_col2,'getHours') (type: int), reflect2(_col2,'getMinutes') (type: int), reflect2(_col2,'getSeconds') (type: int), reflect2(_col2,'getTime') (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 5 + Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: SELECT key, reflect2(key, "byteValue"), diff --git a/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out b/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out index b63bfcd..8292ca9 100644 --- a/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out +++ b/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out @@ -92,74 +92,50 @@ POSTHOOK: query: -- PPD explain select * from (select * from src) a where unix_timestamp(a.key) > 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (unix_timestamp(_col0) > 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (unix_timestamp(_col0) > 10) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: explain select * from (select * from src) a where to_unix_timestamp(a.key) > 10 PREHOOK: type: QUERY POSTHOOK: query: explain select * from (select * from src) a where to_unix_timestamp(a.key) > 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-0 is a root stage STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator - predicate: (to_unix_timestamp(key) > 10) (type: boolean) - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Stage: Stage-0 Fetch Operator limit: -1 Processor Tree: - ListSink + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (to_unix_timestamp(key) > 10) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + ListSink diff --git a/ql/src/test/results/clientpositive/udf_using.q.out b/ql/src/test/results/clientpositive/udf_using.q.out index 3e33cc3..a226b79 100644 --- a/ql/src/test/results/clientpositive/udf_using.q.out +++ b/ql/src/test/results/clientpositive/udf_using.q.out @@ -6,6 +6,7 @@ PREHOOK: Output: default.lookup POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: database:default POSTHOOK: Output: default.lookup +#### A masked pattern was here #### PREHOOK: query: create table udf_using (c1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/udf_when.q.out b/ql/src/test/results/clientpositive/udf_when.q.out index fb262a7..696d7b0 100644 --- a/ql/src/test/results/clientpositive/udf_when.q.out +++ b/ql/src/test/results/clientpositive/udf_when.q.out @@ -91,7 +91,7 @@ STAGE PLANS: Select Operator expressions: 2 (type: int), 9 (type: int), 14 (type: int), null (type: void), CASE WHEN (false) THEN (null) WHEN (true) THEN (24) END (type: int), CASE WHEN (false) THEN (27) WHEN (true) THEN (null) END (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 500 Data size: 12000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 10000 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: SELECT CASE diff --git a/ql/src/test/results/clientpositive/udtf_explode.q.out b/ql/src/test/results/clientpositive/udtf_explode.q.out index 14640ac..6213746 100644 --- a/ql/src/test/results/clientpositive/udtf_explode.q.out +++ b/ql/src/test/results/clientpositive/udtf_explode.q.out @@ -348,17 +348,17 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -678,17 +678,17 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/union10.q.out b/ql/src/test/results/clientpositive/union10.q.out index 961be51..d261a75 100644 --- a/ql/src/test/results/clientpositive/union10.q.out +++ b/ql/src/test/results/clientpositive/union10.q.out @@ -83,10 +83,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -98,10 +98,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -113,10 +113,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union11.q.out b/ql/src/test/results/clientpositive/union11.q.out index fc28d05..b624c7d 100644 --- a/ql/src/test/results/clientpositive/union11.q.out +++ b/ql/src/test/results/clientpositive/union11.q.out @@ -75,12 +75,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) TableScan Union @@ -94,12 +94,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) TableScan Union @@ -113,12 +113,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union12.q.out b/ql/src/test/results/clientpositive/union12.q.out index e634539..4df390e 100644 --- a/ql/src/test/results/clientpositive/union12.q.out +++ b/ql/src/test/results/clientpositive/union12.q.out @@ -83,10 +83,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -98,10 +98,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -113,10 +113,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 276 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union14.q.out b/ql/src/test/results/clientpositive/union14.q.out index 84464a5..02821d7 100644 --- a/ql/src/test/results/clientpositive/union14.q.out +++ b/ql/src/test/results/clientpositive/union14.q.out @@ -77,12 +77,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) TableScan Union @@ -96,12 +96,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -109,14 +109,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union15.q.out b/ql/src/test/results/clientpositive/union15.q.out index 6337153..9cac42b 100644 --- a/ql/src/test/results/clientpositive/union15.q.out +++ b/ql/src/test/results/clientpositive/union15.q.out @@ -73,12 +73,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: s2 @@ -98,12 +98,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: s3 @@ -123,12 +123,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -136,14 +136,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 12 Data size: 1200 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 12 Data size: 1200 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 12 Data size: 1200 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union17.q.out b/ql/src/test/results/clientpositive/union17.q.out index fc647ff..699fc7c 100644 --- a/ql/src/test/results/clientpositive/union17.q.out +++ b/ql/src/test/results/clientpositive/union17.q.out @@ -114,7 +114,7 @@ STAGE PLANS: keys: VALUE._col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 96192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false table: @@ -126,7 +126,7 @@ STAGE PLANS: keys: VALUE._col0 (type: string), VALUE._col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 501 Data size: 188376 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false table: @@ -142,7 +142,7 @@ STAGE PLANS: key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 501 Data size: 96192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -150,14 +150,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -185,7 +185,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 501 Data size: 188376 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -193,14 +193,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 47000 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 47000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 47000 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union19.q.out b/ql/src/test/results/clientpositive/union19.q.out index 341c4cd..3e8fea3 100644 --- a/ql/src/test/results/clientpositive/union19.q.out +++ b/ql/src/test/results/clientpositive/union19.q.out @@ -90,12 +90,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Select Operator expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string) @@ -127,12 +127,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Select Operator expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string) @@ -152,14 +152,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 12500 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 12500 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 12500 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union21.q.out b/ql/src/test/results/clientpositive/union21.q.out index eaaffb0..fb2328d 100644 --- a/ql/src/test/results/clientpositive/union21.q.out +++ b/ql/src/test/results/clientpositive/union21.q.out @@ -60,12 +60,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: src_thrift @@ -85,12 +85,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: src @@ -110,12 +110,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: src @@ -135,12 +135,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: src @@ -160,12 +160,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -173,14 +173,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 761 Data size: 76100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 380 Data size: 38000 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 761 Data size: 76100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 380 Data size: 38000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 761 Data size: 76100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 380 Data size: 38000 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union4.q.out b/ql/src/test/results/clientpositive/union4.q.out index bf9bcdc..90daaad 100644 --- a/ql/src/test/results/clientpositive/union4.q.out +++ b/ql/src/test/results/clientpositive/union4.q.out @@ -80,10 +80,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -95,10 +95,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: string), UDFToInteger(_col1) (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union5.q.out b/ql/src/test/results/clientpositive/union5.q.out index 70c18b9..5d2a447 100644 --- a/ql/src/test/results/clientpositive/union5.q.out +++ b/ql/src/test/results/clientpositive/union5.q.out @@ -70,12 +70,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) TableScan Union @@ -89,12 +89,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union7.q.out b/ql/src/test/results/clientpositive/union7.q.out index 26561ae..46ed3db 100644 --- a/ql/src/test/results/clientpositive/union7.q.out +++ b/ql/src/test/results/clientpositive/union7.q.out @@ -69,12 +69,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: s2 @@ -94,12 +94,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -107,14 +107,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union_remove_1.q.out b/ql/src/test/results/clientpositive/union_remove_1.q.out index 6e03d4e..62bc729 100644 --- a/ql/src/test/results/clientpositive/union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/union_remove_1.q.out @@ -81,12 +81,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -133,12 +133,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_10.q.out b/ql/src/test/results/clientpositive/union_remove_10.q.out index 017944f..b078793 100644 --- a/ql/src/test/results/clientpositive/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/union_remove_10.q.out @@ -103,12 +103,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_13.q.out b/ql/src/test/results/clientpositive/union_remove_13.q.out index f2a7324..3e129e7 100644 --- a/ql/src/test/results/clientpositive/union_remove_13.q.out +++ b/ql/src/test/results/clientpositive/union_remove_13.q.out @@ -95,12 +95,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_15.q.out b/ql/src/test/results/clientpositive/union_remove_15.q.out index 902400d..f37b098 100644 --- a/ql/src/test/results/clientpositive/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/union_remove_15.q.out @@ -87,12 +87,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -141,12 +141,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_16.q.out b/ql/src/test/results/clientpositive/union_remove_16.q.out index e92931c..3e84c7e 100644 --- a/ql/src/test/results/clientpositive/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/union_remove_16.q.out @@ -90,12 +90,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -173,12 +173,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_17.q.out b/ql/src/test/results/clientpositive/union_remove_17.q.out index 593ed06..5b466c6 100644 --- a/ql/src/test/results/clientpositive/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/union_remove_17.q.out @@ -81,12 +81,14 @@ STAGE PLANS: expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 TableScan alias: inputtbl1 Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE @@ -100,23 +102,14 @@ STAGE PLANS: expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE - Reduce Output Operator - key expressions: _col2 (type: string) - sort order: + - Map-reduce partition columns: _col2 (type: string) + File Output Operator + compressed: false Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: string) - Reduce Operator Tree: - Extract - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat - output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat - serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe - name: default.outputtbl1 + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 Stage: Stage-0 Move Operator diff --git a/ql/src/test/results/clientpositive/union_remove_18.q.out b/ql/src/test/results/clientpositive/union_remove_18.q.out index eed4394..d1cff4a 100644 --- a/ql/src/test/results/clientpositive/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/union_remove_18.q.out @@ -85,12 +85,12 @@ STAGE PLANS: keys: key (type: string), ds (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -139,12 +139,12 @@ STAGE PLANS: keys: key (type: string), ds (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_19.q.out b/ql/src/test/results/clientpositive/union_remove_19.q.out index 51c154f..fb4c192 100644 --- a/ql/src/test/results/clientpositive/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/union_remove_19.q.out @@ -85,12 +85,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -137,12 +137,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -445,12 +445,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -504,12 +504,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_2.q.out b/ql/src/test/results/clientpositive/union_remove_2.q.out index 1efdce6..e407139 100644 --- a/ql/src/test/results/clientpositive/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/union_remove_2.q.out @@ -117,12 +117,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_20.q.out b/ql/src/test/results/clientpositive/union_remove_20.q.out index bc90c08..b4ba781 100644 --- a/ql/src/test/results/clientpositive/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/union_remove_20.q.out @@ -83,12 +83,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -135,12 +135,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_21.q.out b/ql/src/test/results/clientpositive/union_remove_21.q.out index 5734786..3cb3312 100644 --- a/ql/src/test/results/clientpositive/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/union_remove_21.q.out @@ -83,12 +83,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -135,12 +135,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_22.q.out b/ql/src/test/results/clientpositive/union_remove_22.q.out index b835523..439e5a2 100644 --- a/ql/src/test/results/clientpositive/union_remove_22.q.out +++ b/ql/src/test/results/clientpositive/union_remove_22.q.out @@ -81,12 +81,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -133,12 +133,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -277,12 +277,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -329,12 +329,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_23.q.out b/ql/src/test/results/clientpositive/union_remove_23.q.out index dd82b4f..d4f1468 100644 --- a/ql/src/test/results/clientpositive/union_remove_23.q.out +++ b/ql/src/test/results/clientpositive/union_remove_23.q.out @@ -86,12 +86,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_24.q.out b/ql/src/test/results/clientpositive/union_remove_24.q.out index 38a68cf..5d1f4fa 100644 --- a/ql/src/test/results/clientpositive/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/union_remove_24.q.out @@ -79,12 +79,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -131,12 +131,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_25.q.out b/ql/src/test/results/clientpositive/union_remove_25.q.out index 661be8b..3b20e15 100644 --- a/ql/src/test/results/clientpositive/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/union_remove_25.q.out @@ -97,12 +97,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -151,12 +151,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_4.q.out b/ql/src/test/results/clientpositive/union_remove_4.q.out index f27f6b7..f139595 100644 --- a/ql/src/test/results/clientpositive/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/union_remove_4.q.out @@ -86,12 +86,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -177,12 +177,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_5.q.out b/ql/src/test/results/clientpositive/union_remove_5.q.out index a6d7b3d..6841f43 100644 --- a/ql/src/test/results/clientpositive/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/union_remove_5.q.out @@ -163,12 +163,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_6.q.out b/ql/src/test/results/clientpositive/union_remove_6.q.out index a9d2e8b..1b501af 100644 --- a/ql/src/test/results/clientpositive/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6.q.out @@ -85,12 +85,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -203,12 +203,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_7.q.out b/ql/src/test/results/clientpositive/union_remove_7.q.out index 6a28b95..679b4d7 100644 --- a/ql/src/test/results/clientpositive/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/union_remove_7.q.out @@ -85,12 +85,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -137,12 +137,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_8.q.out b/ql/src/test/results/clientpositive/union_remove_8.q.out index a688cff..529105b 100644 --- a/ql/src/test/results/clientpositive/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/union_remove_8.q.out @@ -121,12 +121,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_remove_9.q.out b/ql/src/test/results/clientpositive/union_remove_9.q.out index 3ab6ed5..9b4168e 100644 --- a/ql/src/test/results/clientpositive/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/union_remove_9.q.out @@ -183,12 +183,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git a/ql/src/test/results/clientpositive/union_view.q.out b/ql/src/test/results/clientpositive/union_view.q.out index 5c32ad6..bbbf5e7 100644 --- a/ql/src/test/results/clientpositive/union_view.q.out +++ b/ql/src/test/results/clientpositive/union_view.q.out @@ -260,31 +260,31 @@ STAGE PLANS: TableScan alias: src_union_1 filterExpr: (ds = '1') (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -331,31 +331,31 @@ STAGE PLANS: TableScan alias: src_union_2 filterExpr: (ds = '2') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -402,31 +402,31 @@ STAGE PLANS: TableScan alias: src_union_3 filterExpr: (ds = '3') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -852,35 +852,35 @@ STAGE PLANS: TableScan alias: src_union_1 filterExpr: (ds = '1') (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Union - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -927,35 +927,35 @@ STAGE PLANS: TableScan alias: src_union_2 filterExpr: (ds = '2') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Union - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1002,35 +1002,35 @@ STAGE PLANS: TableScan alias: src_union_3 filterExpr: (ds = '3') (type: boolean) - Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Union - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1146,35 +1146,35 @@ STAGE PLANS: TableScan alias: src_union_3 filterExpr: (ds = '4') (type: boolean) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Union - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: bigint) Reduce Operator Tree: Group By Operator aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/update_after_multiple_inserts.q.out b/ql/src/test/results/clientpositive/update_after_multiple_inserts.q.out index cb8e319..a2ad3af 100644 --- a/ql/src/test/results/clientpositive/update_after_multiple_inserts.q.out +++ b/ql/src/test/results/clientpositive/update_after_multiple_inserts.q.out @@ -1,12 +1,12 @@ PREHOOK: query: create table acid_uami(i int, de decimal(5,2), - vc varchar(128)) clustered by (i) into 2 buckets stored as orc + vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uami POSTHOOK: query: create table acid_uami(i int, de decimal(5,2), - vc varchar(128)) clustered by (i) into 2 buckets stored as orc + vc varchar(128)) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uami diff --git a/ql/src/test/results/clientpositive/update_all_non_partitioned.q.out b/ql/src/test/results/clientpositive/update_all_non_partitioned.q.out index fde6d8d..39dd71b 100644 --- a/ql/src/test/results/clientpositive/update_all_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/update_all_non_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uanp -POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uanp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uanp diff --git a/ql/src/test/results/clientpositive/update_all_partitioned.q.out b/ql/src/test/results/clientpositive/update_all_partitioned.q.out index 3fae6a9..533dd88 100644 --- a/ql/src/test/results/clientpositive/update_all_partitioned.q.out +++ b/ql/src/test/results/clientpositive/update_all_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uap -POSTHOOK: query: create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uap(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uap @@ -85,15 +85,21 @@ POSTHOOK: Input: default@acid_uap@ds=today POSTHOOK: Input: default@acid_uap@ds=tomorrow #### A masked pattern was here #### -1073279343 fred today +-1073279343 oj1YrV5Wa today -1073051226 fred today -1072910839 fred today +-1072081801 dPkN74F7 today -1072081801 fred today -1072076362 fred today -1071480828 fred today +-1071363017 Anj0oF today -1071363017 fred today +-1070883071 0ruyd6Y50JpdGRf6HqD today -1070883071 fred today -1070551679 fred today +-1070551679 iUR3Q today -1069736047 fred today +-1069736047 k17Am8uPHWk02cEf1jet today 762 fred tomorrow 762 fred tomorrow 762 fred tomorrow diff --git a/ql/src/test/results/clientpositive/update_all_types.q.out b/ql/src/test/results/clientpositive/update_all_types.q.out index 36b4684..f1353d0 100644 --- a/ql/src/test/results/clientpositive/update_all_types.q.out +++ b/ql/src/test/results/clientpositive/update_all_types.q.out @@ -10,7 +10,7 @@ PREHOOK: query: create table acid_uat(ti tinyint, s string, vc varchar(128), ch char(36), - b boolean) clustered by (i) into 2 buckets stored as orc + b boolean) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uat @@ -26,7 +26,7 @@ POSTHOOK: query: create table acid_uat(ti tinyint, s string, vc varchar(128), ch char(36), - b boolean) clustered by (i) into 2 buckets stored as orc + b boolean) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uat @@ -151,3 +151,37 @@ NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vk NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false 11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true 1 2 3 4 3.14 6.28 5.99 NULL 2014-09-01 its a beautiful day in the neighbhorhood a beautiful day for a neighbor wont you be mine true +PREHOOK: query: update acid_uat set + ti = ti * 2, + si = cast(f as int), + d = floor(de) + where s = 'aw724t8c5558x2xneC624' +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_uat +PREHOOK: Output: default@acid_uat +POSTHOOK: query: update acid_uat set + ti = ti * 2, + si = cast(f as int), + d = floor(de) + where s = 'aw724t8c5558x2xneC624' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_uat +POSTHOOK: Output: default@acid_uat +PREHOOK: query: select * from acid_uat order by i +PREHOOK: type: QUERY +PREHOOK: Input: default@acid_uat +#### A masked pattern was here #### +POSTHOOK: query: select * from acid_uat order by i +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acid_uat +#### A masked pattern was here #### +11 NULL -1073279343 -1595604468 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true +NULL -7382 -1073051226 -1887561756 NULL -7382.0 NULL NULL 1970-01-01 A34p7oRr2WvUJNf A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d false +11 NULL -1072910839 2048385991 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false +NULL 8373 -1072081801 1864027286 NULL 8373.0 NULL NULL 1970-01-01 dPkN74F7 dPkN74F7 4KWs6gw7lv2WYd66P true +NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vklju213J1mr 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P true +-102 -51 -1071480828 -1401575336 -51.0 -51.0 -51.0 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true +8 NULL -1071363017 1349676361 8.0 NULL 8.0 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true +NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false +11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true +1 2 3 4 3.14 6.28 5.99 NULL 2014-09-01 its a beautiful day in the neighbhorhood a beautiful day for a neighbor wont you be mine true diff --git a/ql/src/test/results/clientpositive/update_tmp_table.q.out b/ql/src/test/results/clientpositive/update_tmp_table.q.out index 8180f06..3c86a0c 100644 --- a/ql/src/test/results/clientpositive/update_tmp_table.q.out +++ b/ql/src/test/results/clientpositive/update_tmp_table.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc +PREHOOK: query: create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_utt -POSTHOOK: query: create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc +POSTHOOK: query: create table acid_utt(a int, b varchar(128)) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_utt diff --git a/ql/src/test/results/clientpositive/update_two_cols.q.out b/ql/src/test/results/clientpositive/update_two_cols.q.out index 553608f..5132c0c 100644 --- a/ql/src/test/results/clientpositive/update_two_cols.q.out +++ b/ql/src/test/results/clientpositive/update_two_cols.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_utc -POSTHOOK: query: create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_utc(a int, b varchar(128), c float) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_utc diff --git a/ql/src/test/results/clientpositive/update_where_no_match.q.out b/ql/src/test/results/clientpositive/update_where_no_match.q.out index afef267..c88899e 100644 --- a/ql/src/test/results/clientpositive/update_where_no_match.q.out +++ b/ql/src/test/results/clientpositive/update_where_no_match.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_wnm -POSTHOOK: query: create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_wnm(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_wnm diff --git a/ql/src/test/results/clientpositive/update_where_non_partitioned.q.out b/ql/src/test/results/clientpositive/update_where_non_partitioned.q.out index 5c79379..9c79235 100644 --- a/ql/src/test/results/clientpositive/update_where_non_partitioned.q.out +++ b/ql/src/test/results/clientpositive/update_where_non_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uwnp -POSTHOOK: query: create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uwnp(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uwnp diff --git a/ql/src/test/results/clientpositive/update_where_partitioned.q.out b/ql/src/test/results/clientpositive/update_where_partitioned.q.out index b83c52a..fef0dc0 100644 --- a/ql/src/test/results/clientpositive/update_where_partitioned.q.out +++ b/ql/src/test/results/clientpositive/update_where_partitioned.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +PREHOOK: query: create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acid_uwp -POSTHOOK: query: create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc +POSTHOOK: query: create table acid_uwp(a int, b varchar(128)) partitioned by (ds string) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acid_uwp diff --git a/ql/src/test/results/clientpositive/vector_char_4.q.out b/ql/src/test/results/clientpositive/vector_char_4.q.out new file mode 100644 index 0000000..58988bf --- /dev/null +++ b/ql/src/test/results/clientpositive/vector_char_4.q.out @@ -0,0 +1,202 @@ +PREHOOK: query: drop table if exists vectortab2k +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vectortab2k +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists vectortab2korc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vectortab2korc +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: drop table if exists char_lazy_binary_columnar +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists char_lazy_binary_columnar +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table char_lazy_binary_columnar(ct char(10), csi char(10), ci char(20), cb char(30), cf char(20), cd char(20), cs char(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@char_lazy_binary_columnar +POSTHOOK: query: create table char_lazy_binary_columnar(ct char(10), csi char(10), ci char(20), cb char(30), cf char(20), cd char(20), cs char(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@char_lazy_binary_columnar +PREHOOK: query: explain +insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table char_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( t AS CHAR(10) (type: char(10)), CAST( si AS CHAR(10) (type: char(10)), CAST( i AS CHAR(20) (type: char(20)), CAST( b AS CHAR(30) (type: char(30)), CAST( f AS CHAR(20) (type: char(20)), CAST( d AS CHAR(20) (type: char(20)), CAST( s AS CHAR(50) (type: char(50)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + name: default.char_lazy_binary_columnar + Execution mode: vectorized + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + name: default.char_lazy_binary_columnar + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-5 + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + diff --git a/ql/src/test/results/clientpositive/vector_char_simple.q.out b/ql/src/test/results/clientpositive/vector_char_simple.q.out index 72dc8aa..fbe1b40 100644 --- a/ql/src/test/results/clientpositive/vector_char_simple.q.out +++ b/ql/src/test/results/clientpositive/vector_char_simple.q.out @@ -220,3 +220,98 @@ POSTHOOK: query: drop table char_2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@char_2 POSTHOOK: Output: default@char_2 +PREHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. +create table char_3 ( + field char(12) +) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@char_3 +POSTHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. +create table char_3 ( + field char(12) +) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@char_3 +PREHOOK: query: explain +insert into table char_3 select cint from alltypesorc limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert into table char_3 select cint from alltypesorc limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( _col0 AS CHAR(12) (type: char(12)) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.char_3 + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.char_3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert into table char_3 select cint from alltypesorc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@char_3 +POSTHOOK: query: insert into table char_3 select cint from alltypesorc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@char_3 +POSTHOOK: Lineage: char_3.field EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: drop table char_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@char_3 +PREHOOK: Output: default@char_3 +POSTHOOK: query: drop table char_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@char_3 +POSTHOOK: Output: default@char_3 diff --git a/ql/src/test/results/clientpositive/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/vector_count_distinct.q.out new file mode 100644 index 0000000..becb4e2 --- /dev/null +++ b/ql/src/test/results/clientpositive/vector_count_distinct.q.out @@ -0,0 +1,1360 @@ +PREHOOK: query: create table web_sales_txt +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_web_site_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +row format delimited fields terminated by '|' +stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@web_sales_txt +POSTHOOK: query: create table web_sales_txt +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_web_site_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +row format delimited fields terminated by '|' +stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@web_sales_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/web_sales_2k' OVERWRITE INTO TABLE web_sales_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@web_sales_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/web_sales_2k' OVERWRITE INTO TABLE web_sales_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@web_sales_txt +PREHOOK: query: ------------------------------------------------------------------------------------------ + +create table web_sales +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +partitioned by +( + ws_web_site_sk int +) +stored as orc +tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@web_sales +POSTHOOK: query: ------------------------------------------------------------------------------------------ + +create table web_sales +( + ws_sold_date_sk int, + ws_sold_time_sk int, + ws_ship_date_sk int, + ws_item_sk int, + ws_bill_customer_sk int, + ws_bill_cdemo_sk int, + ws_bill_hdemo_sk int, + ws_bill_addr_sk int, + ws_ship_customer_sk int, + ws_ship_cdemo_sk int, + ws_ship_hdemo_sk int, + ws_ship_addr_sk int, + ws_web_page_sk int, + ws_ship_mode_sk int, + ws_warehouse_sk int, + ws_promo_sk int, + ws_order_number int, + ws_quantity int, + ws_wholesale_cost decimal(7,2), + ws_list_price decimal(7,2), + ws_sales_price decimal(7,2), + ws_ext_discount_amt decimal(7,2), + ws_ext_sales_price decimal(7,2), + ws_ext_wholesale_cost decimal(7,2), + ws_ext_list_price decimal(7,2), + ws_ext_tax decimal(7,2), + ws_coupon_amt decimal(7,2), + ws_ext_ship_cost decimal(7,2), + ws_net_paid decimal(7,2), + ws_net_paid_inc_tax decimal(7,2), + ws_net_paid_inc_ship decimal(7,2), + ws_net_paid_inc_ship_tax decimal(7,2), + ws_net_profit decimal(7,2) +) +partitioned by +( + ws_web_site_sk int +) +stored as orc +tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@web_sales +PREHOOK: query: insert overwrite table web_sales +partition (ws_web_site_sk) +select ws_sold_date_sk, ws_sold_time_sk, ws_ship_date_sk, ws_item_sk, + ws_bill_customer_sk, ws_bill_cdemo_sk, ws_bill_hdemo_sk, ws_bill_addr_sk, + ws_ship_customer_sk, ws_ship_cdemo_sk, ws_ship_hdemo_sk, ws_ship_addr_sk, + ws_web_page_sk, ws_ship_mode_sk, ws_warehouse_sk, ws_promo_sk, ws_order_number, + ws_quantity, ws_wholesale_cost, ws_list_price, ws_sales_price, ws_ext_discount_amt, + ws_ext_sales_price, ws_ext_wholesale_cost, ws_ext_list_price, ws_ext_tax, + ws_coupon_amt, ws_ext_ship_cost, ws_net_paid, ws_net_paid_inc_tax, ws_net_paid_inc_ship, + ws_net_paid_inc_ship_tax, ws_net_profit, ws_web_site_sk from web_sales_txt +PREHOOK: type: QUERY +PREHOOK: Input: default@web_sales_txt +PREHOOK: Output: default@web_sales +POSTHOOK: query: insert overwrite table web_sales +partition (ws_web_site_sk) +select ws_sold_date_sk, ws_sold_time_sk, ws_ship_date_sk, ws_item_sk, + ws_bill_customer_sk, ws_bill_cdemo_sk, ws_bill_hdemo_sk, ws_bill_addr_sk, + ws_ship_customer_sk, ws_ship_cdemo_sk, ws_ship_hdemo_sk, ws_ship_addr_sk, + ws_web_page_sk, ws_ship_mode_sk, ws_warehouse_sk, ws_promo_sk, ws_order_number, + ws_quantity, ws_wholesale_cost, ws_list_price, ws_sales_price, ws_ext_discount_amt, + ws_ext_sales_price, ws_ext_wholesale_cost, ws_ext_list_price, ws_ext_tax, + ws_coupon_amt, ws_ext_ship_cost, ws_net_paid, ws_net_paid_inc_tax, ws_net_paid_inc_ship, + ws_net_paid_inc_ship_tax, ws_net_profit, ws_web_site_sk from web_sales_txt +POSTHOOK: type: QUERY +POSTHOOK: Input: default@web_sales_txt +POSTHOOK: Output: default@web_sales@ws_web_site_sk=1 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=10 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=11 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=12 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=13 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=14 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=15 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=16 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=17 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=18 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=19 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=2 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=20 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=21 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=22 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=23 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=24 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=25 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=26 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=27 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=28 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=29 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=3 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=30 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=4 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=5 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=6 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=7 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=8 +POSTHOOK: Output: default@web_sales@ws_web_site_sk=9 +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=10).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=11).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=12).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=13).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=14).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=15).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=16).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=17).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=18).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=19).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=1).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=20).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=21).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=22).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=23).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=24).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=25).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=26).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=27).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=28).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=29).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=2).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=30).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=3).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=4).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=5).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=6).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=7).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=8).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_bill_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_bill_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_bill_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_bill_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_bill_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_coupon_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_coupon_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_discount_amt SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_discount_amt, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_ship_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_ship_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ext_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ext_wholesale_cost, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_item_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_item_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_list_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_list_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_paid SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_paid_inc_ship SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_paid_inc_ship_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_ship_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_paid_inc_tax SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_paid_inc_tax, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_net_profit SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_net_profit, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_order_number SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_order_number, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_promo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_promo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_quantity SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_quantity, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_sales_price SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sales_price, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_addr_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_addr_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_cdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_cdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_customer_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_customer_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_hdemo_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_hdemo_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_ship_mode_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_ship_mode_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_sold_date_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_date_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_sold_time_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_sold_time_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] +POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] +PREHOOK: query: ------------------------------------------------------------------------------------------ + +explain +select count(distinct ws_order_number) from web_sales +PREHOOK: type: QUERY +POSTHOOK: query: ------------------------------------------------------------------------------------------ + +explain +select count(distinct ws_order_number) from web_sales +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: web_sales + Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ws_order_number (type: int) + outputColumnNames: ws_order_number + Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(DISTINCT ws_order_number) + keys: ws_order_number (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 2000 Data size: 3504000 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(DISTINCT KEY._col0:0._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select count(distinct ws_order_number) from web_sales +PREHOOK: type: QUERY +PREHOOK: Input: default@web_sales +PREHOOK: Input: default@web_sales@ws_web_site_sk=1 +PREHOOK: Input: default@web_sales@ws_web_site_sk=10 +PREHOOK: Input: default@web_sales@ws_web_site_sk=11 +PREHOOK: Input: default@web_sales@ws_web_site_sk=12 +PREHOOK: Input: default@web_sales@ws_web_site_sk=13 +PREHOOK: Input: default@web_sales@ws_web_site_sk=14 +PREHOOK: Input: default@web_sales@ws_web_site_sk=15 +PREHOOK: Input: default@web_sales@ws_web_site_sk=16 +PREHOOK: Input: default@web_sales@ws_web_site_sk=17 +PREHOOK: Input: default@web_sales@ws_web_site_sk=18 +PREHOOK: Input: default@web_sales@ws_web_site_sk=19 +PREHOOK: Input: default@web_sales@ws_web_site_sk=2 +PREHOOK: Input: default@web_sales@ws_web_site_sk=20 +PREHOOK: Input: default@web_sales@ws_web_site_sk=21 +PREHOOK: Input: default@web_sales@ws_web_site_sk=22 +PREHOOK: Input: default@web_sales@ws_web_site_sk=23 +PREHOOK: Input: default@web_sales@ws_web_site_sk=24 +PREHOOK: Input: default@web_sales@ws_web_site_sk=25 +PREHOOK: Input: default@web_sales@ws_web_site_sk=26 +PREHOOK: Input: default@web_sales@ws_web_site_sk=27 +PREHOOK: Input: default@web_sales@ws_web_site_sk=28 +PREHOOK: Input: default@web_sales@ws_web_site_sk=29 +PREHOOK: Input: default@web_sales@ws_web_site_sk=3 +PREHOOK: Input: default@web_sales@ws_web_site_sk=30 +PREHOOK: Input: default@web_sales@ws_web_site_sk=4 +PREHOOK: Input: default@web_sales@ws_web_site_sk=5 +PREHOOK: Input: default@web_sales@ws_web_site_sk=6 +PREHOOK: Input: default@web_sales@ws_web_site_sk=7 +PREHOOK: Input: default@web_sales@ws_web_site_sk=8 +PREHOOK: Input: default@web_sales@ws_web_site_sk=9 +#### A masked pattern was here #### +POSTHOOK: query: select count(distinct ws_order_number) from web_sales +POSTHOOK: type: QUERY +POSTHOOK: Input: default@web_sales +POSTHOOK: Input: default@web_sales@ws_web_site_sk=1 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=10 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=11 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=12 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=13 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=14 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=15 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=16 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=17 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=18 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=19 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=2 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=20 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=21 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=22 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=23 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=24 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=25 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=26 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=27 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=28 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=29 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=3 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=30 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=4 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=5 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=6 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=7 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=8 +POSTHOOK: Input: default@web_sales@ws_web_site_sk=9 +#### A masked pattern was here #### +169 diff --git a/ql/src/test/results/clientpositive/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/vector_distinct_2.q.out new file mode 100644 index 0000000..0019f95 --- /dev/null +++ b/ql/src/test/results/clientpositive/vector_distinct_2.q.out @@ -0,0 +1,1859 @@ +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: explain +select distinct s, t from vectortab2korc +PREHOOK: type: QUERY +POSTHOOK: query: explain +select distinct s, t from vectortab2korc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: s (type: string), t (type: tinyint) + outputColumnNames: s, t + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: s (type: string), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: tinyint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint) + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: tinyint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select distinct s, t from vectortab2korc +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +POSTHOOK: query: select distinct s, t from vectortab2korc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### + NULL + -116 + -114 + -113 + -108 + -107 + -104 + -91 + -87 + -86 + -85 + -84 + -83 + -80 + -75 + -72 + -66 + -53 + -46 + -38 + -36 + -33 + -29 + -28 + -25 + -24 + -19 + -16 + -14 + -12 + -10 + -5 + -4 + -2 + 0 + 4 + 5 + 8 + 12 + 16 + 19 + 22 + 23 + 26 + 29 + 31 + 37 + 42 + 46 + 48 + 54 + 57 + 59 + 60 + 62 + 64 + 79 + 84 + 87 + 88 + 90 + 91 + 96 + 97 + 102 + 103 + 105 + 109 + 112 + 113 + 114 + 123 +american history NULL +american history -127 +american history -119 +american history -116 +american history -112 +american history -110 +american history -108 +american history -105 +american history -103 +american history -101 +american history -95 +american history -94 +american history -92 +american history -90 +american history -89 +american history -87 +american history -86 +american history -81 +american history -80 +american history -73 +american history -66 +american history -60 +american history -59 +american history -55 +american history -53 +american history -50 +american history -48 +american history -47 +american history -34 +american history -30 +american history -28 +american history -27 +american history -26 +american history -19 +american history -18 +american history -17 +american history -15 +american history -11 +american history -9 +american history 0 +american history 5 +american history 20 +american history 22 +american history 29 +american history 31 +american history 33 +american history 38 +american history 39 +american history 40 +american history 42 +american history 46 +american history 48 +american history 54 +american history 56 +american history 60 +american history 68 +american history 69 +american history 71 +american history 72 +american history 74 +american history 77 +american history 79 +american history 82 +american history 84 +american history 89 +american history 96 +american history 100 +american history 103 +american history 111 +american history 119 +american history 122 +american history 123 +biology NULL +biology -127 +biology -121 +biology -120 +biology -119 +biology -104 +biology -101 +biology -98 +biology -97 +biology -96 +biology -92 +biology -90 +biology -86 +biology -85 +biology -83 +biology -82 +biology -81 +biology -79 +biology -72 +biology -67 +biology -66 +biology -59 +biology -57 +biology -54 +biology -53 +biology -49 +biology -46 +biology -44 +biology -42 +biology -40 +biology -36 +biology -35 +biology -31 +biology -20 +biology -19 +biology -16 +biology -13 +biology -7 +biology -6 +biology -5 +biology 0 +biology 2 +biology 3 +biology 4 +biology 5 +biology 6 +biology 7 +biology 8 +biology 12 +biology 18 +biology 27 +biology 30 +biology 34 +biology 35 +biology 36 +biology 41 +biology 47 +biology 51 +biology 52 +biology 54 +biology 55 +biology 57 +biology 64 +biology 83 +biology 86 +biology 89 +biology 90 +biology 91 +biology 100 +biology 111 +biology 113 +biology 114 +biology 115 +biology 117 +biology 120 +biology 124 +chemistry NULL +chemistry -127 +chemistry -125 +chemistry -122 +chemistry -117 +chemistry -115 +chemistry -110 +chemistry -109 +chemistry -108 +chemistry -103 +chemistry -101 +chemistry -98 +chemistry -95 +chemistry -91 +chemistry -90 +chemistry -89 +chemistry -81 +chemistry -75 +chemistry -68 +chemistry -61 +chemistry -59 +chemistry -57 +chemistry -56 +chemistry -52 +chemistry -46 +chemistry -44 +chemistry -40 +chemistry -37 +chemistry -33 +chemistry -31 +chemistry -30 +chemistry -26 +chemistry -23 +chemistry -8 +chemistry -3 +chemistry -2 +chemistry 3 +chemistry 4 +chemistry 8 +chemistry 14 +chemistry 16 +chemistry 19 +chemistry 25 +chemistry 27 +chemistry 32 +chemistry 37 +chemistry 39 +chemistry 41 +chemistry 46 +chemistry 51 +chemistry 55 +chemistry 64 +chemistry 74 +chemistry 75 +chemistry 78 +chemistry 79 +chemistry 83 +chemistry 88 +chemistry 90 +chemistry 91 +chemistry 96 +chemistry 102 +chemistry 104 +chemistry 110 +chemistry 113 +chemistry 116 +chemistry 124 +chemistry 126 +debate NULL +debate -127 +debate -117 +debate -114 +debate -108 +debate -106 +debate -98 +debate -96 +debate -95 +debate -88 +debate -87 +debate -86 +debate -85 +debate -82 +debate -72 +debate -70 +debate -69 +debate -67 +debate -57 +debate -52 +debate -50 +debate -47 +debate -46 +debate -41 +debate -35 +debate -32 +debate -29 +debate -28 +debate -27 +debate -26 +debate -24 +debate -15 +debate -12 +debate -3 +debate 5 +debate 17 +debate 19 +debate 20 +debate 26 +debate 34 +debate 36 +debate 41 +debate 42 +debate 52 +debate 54 +debate 56 +debate 63 +debate 67 +debate 68 +debate 69 +debate 70 +debate 73 +debate 75 +debate 77 +debate 85 +debate 87 +debate 89 +debate 90 +debate 93 +debate 94 +debate 113 +debate 124 +education NULL +education -127 +education -125 +education -113 +education -111 +education -109 +education -105 +education -101 +education -98 +education -96 +education -93 +education -92 +education -89 +education -87 +education -86 +education -77 +education -72 +education -69 +education -64 +education -61 +education -55 +education -52 +education -44 +education -43 +education -38 +education -37 +education -36 +education -34 +education -28 +education -24 +education -15 +education -13 +education -9 +education 4 +education 12 +education 17 +education 27 +education 33 +education 34 +education 35 +education 38 +education 41 +education 45 +education 46 +education 49 +education 51 +education 53 +education 59 +education 61 +education 66 +education 73 +education 75 +education 77 +education 81 +education 82 +education 85 +education 89 +education 95 +education 102 +education 103 +education 110 +education 111 +education 113 +education 114 +education 116 +education 119 +education 125 +forestry NULL +forestry -126 +forestry -118 +forestry -116 +forestry -109 +forestry -105 +forestry -98 +forestry -95 +forestry -94 +forestry -83 +forestry -81 +forestry -80 +forestry -79 +forestry -77 +forestry -74 +forestry -73 +forestry -68 +forestry -58 +forestry -51 +forestry -50 +forestry -49 +forestry -42 +forestry -34 +forestry -23 +forestry -12 +forestry -11 +forestry -10 +forestry -8 +forestry -3 +forestry -1 +forestry 0 +forestry 7 +forestry 8 +forestry 11 +forestry 12 +forestry 19 +forestry 29 +forestry 31 +forestry 35 +forestry 37 +forestry 39 +forestry 40 +forestry 44 +forestry 45 +forestry 50 +forestry 51 +forestry 52 +forestry 55 +forestry 56 +forestry 71 +forestry 78 +forestry 79 +forestry 87 +forestry 90 +forestry 93 +forestry 94 +forestry 97 +forestry 102 +forestry 106 +forestry 107 +forestry 111 +forestry 115 +forestry 117 +forestry 120 +geology NULL +geology -124 +geology -117 +geology -115 +geology -112 +geology -108 +geology -106 +geology -101 +geology -100 +geology -96 +geology -94 +geology -84 +geology -82 +geology -80 +geology -79 +geology -75 +geology -72 +geology -68 +geology -59 +geology -57 +geology -54 +geology -53 +geology -50 +geology -48 +geology -35 +geology -34 +geology -32 +geology -31 +geology -30 +geology -23 +geology -21 +geology -18 +geology -16 +geology 1 +geology 4 +geology 5 +geology 6 +geology 9 +geology 14 +geology 18 +geology 21 +geology 23 +geology 26 +geology 28 +geology 31 +geology 33 +geology 37 +geology 38 +geology 49 +geology 58 +geology 72 +geology 82 +geology 84 +geology 86 +geology 89 +geology 92 +geology 93 +geology 95 +geology 100 +geology 101 +geology 102 +geology 116 +geology 121 +geology 124 +geology 127 +history NULL +history -125 +history -124 +history -105 +history -104 +history -103 +history -101 +history -98 +history -91 +history -90 +history -89 +history -79 +history -75 +history -67 +history -61 +history -58 +history -54 +history -42 +history -37 +history -34 +history -26 +history -24 +history -19 +history -18 +history -15 +history -12 +history -8 +history -5 +history -1 +history 1 +history 2 +history 7 +history 14 +history 18 +history 25 +history 50 +history 51 +history 52 +history 57 +history 58 +history 59 +history 61 +history 62 +history 71 +history 72 +history 73 +history 75 +history 78 +history 84 +history 92 +history 95 +history 98 +history 100 +history 108 +history 112 +history 113 +history 114 +history 126 +history 127 +industrial engineering NULL +industrial engineering -124 +industrial engineering -110 +industrial engineering -101 +industrial engineering -98 +industrial engineering -96 +industrial engineering -87 +industrial engineering -85 +industrial engineering -72 +industrial engineering -68 +industrial engineering -65 +industrial engineering -58 +industrial engineering -57 +industrial engineering -53 +industrial engineering -49 +industrial engineering -47 +industrial engineering -43 +industrial engineering -38 +industrial engineering -35 +industrial engineering -31 +industrial engineering -30 +industrial engineering -28 +industrial engineering -22 +industrial engineering -11 +industrial engineering -7 +industrial engineering -6 +industrial engineering -5 +industrial engineering -1 +industrial engineering 3 +industrial engineering 7 +industrial engineering 27 +industrial engineering 29 +industrial engineering 32 +industrial engineering 33 +industrial engineering 36 +industrial engineering 42 +industrial engineering 43 +industrial engineering 48 +industrial engineering 54 +industrial engineering 58 +industrial engineering 59 +industrial engineering 70 +industrial engineering 73 +industrial engineering 78 +industrial engineering 79 +industrial engineering 83 +industrial engineering 95 +industrial engineering 96 +industrial engineering 98 +industrial engineering 99 +industrial engineering 102 +industrial engineering 104 +industrial engineering 105 +industrial engineering 106 +industrial engineering 113 +industrial engineering 126 +joggying NULL +joggying -125 +joggying -121 +joggying -119 +joggying -110 +joggying -101 +joggying -100 +joggying -80 +joggying -79 +joggying -77 +joggying -76 +joggying -73 +joggying -69 +joggying -64 +joggying -62 +joggying -61 +joggying -57 +joggying -55 +joggying -48 +joggying -47 +joggying -43 +joggying -40 +joggying -30 +joggying -27 +joggying -24 +joggying -15 +joggying -14 +joggying -8 +joggying -1 +joggying 13 +joggying 20 +joggying 25 +joggying 26 +joggying 27 +joggying 28 +joggying 37 +joggying 43 +joggying 46 +joggying 48 +joggying 49 +joggying 52 +joggying 57 +joggying 61 +joggying 62 +joggying 69 +joggying 70 +joggying 72 +joggying 74 +joggying 80 +joggying 85 +joggying 87 +joggying 92 +joggying 93 +joggying 94 +joggying 97 +joggying 99 +joggying 104 +joggying 105 +joggying 118 +joggying 119 +joggying 121 +joggying 123 +joggying 125 +kindergarten NULL +kindergarten -126 +kindergarten -113 +kindergarten -106 +kindergarten -98 +kindergarten -95 +kindergarten -92 +kindergarten -79 +kindergarten -78 +kindergarten -75 +kindergarten -74 +kindergarten -69 +kindergarten -60 +kindergarten -59 +kindergarten -57 +kindergarten -54 +kindergarten -42 +kindergarten -40 +kindergarten -26 +kindergarten -18 +kindergarten -8 +kindergarten 10 +kindergarten 16 +kindergarten 18 +kindergarten 19 +kindergarten 23 +kindergarten 29 +kindergarten 37 +kindergarten 46 +kindergarten 48 +kindergarten 51 +kindergarten 52 +kindergarten 55 +kindergarten 61 +kindergarten 66 +kindergarten 69 +kindergarten 82 +kindergarten 84 +kindergarten 85 +kindergarten 86 +kindergarten 90 +kindergarten 92 +kindergarten 96 +kindergarten 100 +kindergarten 101 +kindergarten 109 +kindergarten 111 +kindergarten 116 +kindergarten 118 +kindergarten 120 +kindergarten 122 +kindergarten 127 +linguistics NULL +linguistics -127 +linguistics -122 +linguistics -113 +linguistics -101 +linguistics -90 +linguistics -89 +linguistics -87 +linguistics -86 +linguistics -78 +linguistics -77 +linguistics -73 +linguistics -70 +linguistics -69 +linguistics -68 +linguistics -67 +linguistics -53 +linguistics -52 +linguistics -41 +linguistics -34 +linguistics -28 +linguistics -22 +linguistics -20 +linguistics -17 +linguistics -16 +linguistics -14 +linguistics -13 +linguistics -12 +linguistics -6 +linguistics -4 +linguistics -2 +linguistics 0 +linguistics 1 +linguistics 3 +linguistics 6 +linguistics 7 +linguistics 10 +linguistics 11 +linguistics 18 +linguistics 33 +linguistics 37 +linguistics 42 +linguistics 44 +linguistics 50 +linguistics 53 +linguistics 64 +linguistics 67 +linguistics 72 +linguistics 73 +linguistics 83 +linguistics 89 +linguistics 93 +linguistics 96 +linguistics 98 +linguistics 100 +linguistics 113 +linguistics 115 +linguistics 123 +linguistics 125 +linguistics 126 +mathematics NULL +mathematics -127 +mathematics -124 +mathematics -122 +mathematics -120 +mathematics -118 +mathematics -117 +mathematics -103 +mathematics -101 +mathematics -100 +mathematics -99 +mathematics -98 +mathematics -95 +mathematics -91 +mathematics -88 +mathematics -81 +mathematics -79 +mathematics -77 +mathematics -75 +mathematics -66 +mathematics -57 +mathematics -52 +mathematics -50 +mathematics -49 +mathematics -46 +mathematics -45 +mathematics -40 +mathematics -33 +mathematics -31 +mathematics -21 +mathematics -19 +mathematics -7 +mathematics 0 +mathematics 3 +mathematics 6 +mathematics 10 +mathematics 22 +mathematics 23 +mathematics 25 +mathematics 32 +mathematics 35 +mathematics 38 +mathematics 39 +mathematics 46 +mathematics 48 +mathematics 50 +mathematics 53 +mathematics 55 +mathematics 56 +mathematics 58 +mathematics 59 +mathematics 62 +mathematics 63 +mathematics 65 +mathematics 76 +mathematics 79 +mathematics 80 +mathematics 82 +mathematics 87 +mathematics 92 +mathematics 98 +mathematics 102 +mathematics 107 +mathematics 111 +mathematics 114 +nap time NULL +nap time -122 +nap time -119 +nap time -115 +nap time -113 +nap time -104 +nap time -102 +nap time -101 +nap time -91 +nap time -85 +nap time -71 +nap time -61 +nap time -54 +nap time -49 +nap time -45 +nap time -41 +nap time -31 +nap time -15 +nap time -6 +nap time -4 +nap time -2 +nap time 0 +nap time 3 +nap time 6 +nap time 16 +nap time 23 +nap time 27 +nap time 31 +nap time 35 +nap time 42 +nap time 51 +nap time 52 +nap time 56 +nap time 62 +nap time 69 +nap time 70 +nap time 73 +nap time 76 +nap time 80 +nap time 89 +nap time 90 +nap time 92 +nap time 93 +nap time 98 +nap time 103 +nap time 104 +nap time 105 +nap time 107 +nap time 108 +nap time 118 +opthamology NULL +opthamology -122 +opthamology -121 +opthamology -118 +opthamology -111 +opthamology -99 +opthamology -97 +opthamology -91 +opthamology -86 +opthamology -82 +opthamology -79 +opthamology -78 +opthamology -77 +opthamology -75 +opthamology -73 +opthamology -63 +opthamology -59 +opthamology -55 +opthamology -49 +opthamology -48 +opthamology -44 +opthamology -35 +opthamology -33 +opthamology -30 +opthamology -28 +opthamology -27 +opthamology -24 +opthamology -22 +opthamology -18 +opthamology -17 +opthamology -14 +opthamology -8 +opthamology -5 +opthamology 4 +opthamology 5 +opthamology 21 +opthamology 39 +opthamology 41 +opthamology 48 +opthamology 49 +opthamology 52 +opthamology 53 +opthamology 55 +opthamology 68 +opthamology 69 +opthamology 74 +opthamology 76 +opthamology 77 +opthamology 79 +opthamology 81 +opthamology 84 +opthamology 87 +opthamology 88 +opthamology 89 +opthamology 92 +opthamology 96 +opthamology 97 +opthamology 100 +opthamology 104 +opthamology 117 +opthamology 120 +opthamology 122 +opthamology 125 +opthamology 127 +philosophy NULL +philosophy -125 +philosophy -121 +philosophy -119 +philosophy -115 +philosophy -110 +philosophy -105 +philosophy -103 +philosophy -100 +philosophy -99 +philosophy -95 +philosophy -93 +philosophy -92 +philosophy -80 +philosophy -78 +philosophy -77 +philosophy -69 +philosophy -68 +philosophy -61 +philosophy -56 +philosophy -55 +philosophy -53 +philosophy -52 +philosophy -51 +philosophy -50 +philosophy -40 +philosophy -39 +philosophy -27 +philosophy -26 +philosophy -25 +philosophy -17 +philosophy -11 +philosophy 8 +philosophy 20 +philosophy 21 +philosophy 22 +philosophy 29 +philosophy 31 +philosophy 34 +philosophy 38 +philosophy 41 +philosophy 43 +philosophy 45 +philosophy 48 +philosophy 64 +philosophy 67 +philosophy 68 +philosophy 73 +philosophy 83 +philosophy 96 +philosophy 98 +philosophy 104 +philosophy 108 +philosophy 117 +philosophy 118 +philosophy 120 +philosophy 123 +quiet hour NULL +quiet hour -127 +quiet hour -123 +quiet hour -121 +quiet hour -119 +quiet hour -114 +quiet hour -111 +quiet hour -105 +quiet hour -104 +quiet hour -88 +quiet hour -87 +quiet hour -76 +quiet hour -73 +quiet hour -68 +quiet hour -66 +quiet hour -65 +quiet hour -56 +quiet hour -55 +quiet hour -52 +quiet hour -50 +quiet hour -48 +quiet hour -45 +quiet hour -42 +quiet hour -41 +quiet hour -33 +quiet hour -31 +quiet hour -25 +quiet hour -14 +quiet hour -8 +quiet hour -1 +quiet hour 0 +quiet hour 6 +quiet hour 7 +quiet hour 8 +quiet hour 13 +quiet hour 21 +quiet hour 23 +quiet hour 29 +quiet hour 30 +quiet hour 33 +quiet hour 35 +quiet hour 38 +quiet hour 43 +quiet hour 58 +quiet hour 60 +quiet hour 66 +quiet hour 71 +quiet hour 74 +quiet hour 80 +quiet hour 82 +quiet hour 84 +quiet hour 93 +quiet hour 98 +quiet hour 110 +quiet hour 112 +quiet hour 115 +quiet hour 120 +quiet hour 121 +quiet hour 123 +religion NULL +religion -125 +religion -106 +religion -104 +religion -94 +religion -93 +religion -81 +religion -78 +religion -77 +religion -76 +religion -71 +religion -70 +religion -69 +religion -64 +religion -62 +religion -60 +religion -56 +religion -44 +religion -42 +religion -41 +religion -38 +religion -35 +religion -32 +religion -29 +religion -28 +religion -26 +religion -24 +religion -23 +religion -9 +religion -7 +religion -3 +religion 0 +religion 2 +religion 4 +religion 5 +religion 15 +religion 17 +religion 29 +religion 31 +religion 38 +religion 44 +religion 45 +religion 49 +religion 52 +religion 54 +religion 58 +religion 67 +religion 70 +religion 73 +religion 74 +religion 76 +religion 78 +religion 82 +religion 92 +religion 93 +religion 96 +religion 97 +religion 102 +religion 103 +religion 106 +religion 107 +religion 110 +religion 115 +religion 120 +religion 123 +religion 124 +study skills NULL +study skills -127 +study skills -126 +study skills -117 +study skills -107 +study skills -106 +study skills -100 +study skills -88 +study skills -86 +study skills -82 +study skills -81 +study skills -76 +study skills -73 +study skills -65 +study skills -52 +study skills -36 +study skills -33 +study skills -27 +study skills -26 +study skills -22 +study skills -17 +study skills -14 +study skills -13 +study skills -6 +study skills -5 +study skills -4 +study skills -3 +study skills -1 +study skills 2 +study skills 3 +study skills 18 +study skills 21 +study skills 23 +study skills 25 +study skills 28 +study skills 29 +study skills 30 +study skills 35 +study skills 39 +study skills 40 +study skills 47 +study skills 49 +study skills 50 +study skills 54 +study skills 55 +study skills 58 +study skills 62 +study skills 63 +study skills 66 +study skills 68 +study skills 72 +study skills 77 +study skills 80 +study skills 83 +study skills 92 +study skills 95 +study skills 96 +study skills 101 +study skills 106 +study skills 107 +study skills 110 +study skills 115 +study skills 119 +study skills 123 +topology NULL +topology -122 +topology -116 +topology -106 +topology -105 +topology -102 +topology -98 +topology -96 +topology -88 +topology -86 +topology -78 +topology -74 +topology -71 +topology -60 +topology -58 +topology -57 +topology -50 +topology -44 +topology -42 +topology -41 +topology -36 +topology -32 +topology -31 +topology -25 +topology -21 +topology -5 +topology -1 +topology 7 +topology 11 +topology 13 +topology 14 +topology 18 +topology 26 +topology 30 +topology 38 +topology 41 +topology 42 +topology 47 +topology 50 +topology 52 +topology 54 +topology 55 +topology 58 +topology 59 +topology 61 +topology 63 +topology 67 +topology 69 +topology 71 +topology 80 +topology 81 +topology 83 +topology 86 +topology 87 +topology 94 +topology 105 +topology 107 +topology 119 +topology 121 +topology 127 +undecided NULL +undecided -120 +undecided -118 +undecided -117 +undecided -116 +undecided -115 +undecided -114 +undecided -112 +undecided -105 +undecided -104 +undecided -96 +undecided -93 +undecided -90 +undecided -84 +undecided -83 +undecided -78 +undecided -69 +undecided -62 +undecided -53 +undecided -52 +undecided -51 +undecided -43 +undecided -41 +undecided -29 +undecided -28 +undecided -23 +undecided -19 +undecided -13 +undecided -10 +undecided -8 +undecided 0 +undecided 1 +undecided 7 +undecided 11 +undecided 13 +undecided 14 +undecided 27 +undecided 30 +undecided 33 +undecided 37 +undecided 45 +undecided 47 +undecided 50 +undecided 51 +undecided 56 +undecided 69 +undecided 76 +undecided 95 +undecided 97 +undecided 98 +undecided 111 +undecided 114 +undecided 119 +undecided 123 +undecided 124 +values clariffication NULL +values clariffication -123 +values clariffication -119 +values clariffication -114 +values clariffication -109 +values clariffication -108 +values clariffication -107 +values clariffication -105 +values clariffication -100 +values clariffication -98 +values clariffication -97 +values clariffication -95 +values clariffication -92 +values clariffication -90 +values clariffication -81 +values clariffication -75 +values clariffication -70 +values clariffication -69 +values clariffication -67 +values clariffication -63 +values clariffication -62 +values clariffication -60 +values clariffication -55 +values clariffication -51 +values clariffication -50 +values clariffication -48 +values clariffication -46 +values clariffication -42 +values clariffication -40 +values clariffication -38 +values clariffication -37 +values clariffication -31 +values clariffication -28 +values clariffication -8 +values clariffication -6 +values clariffication -5 +values clariffication 4 +values clariffication 8 +values clariffication 9 +values clariffication 10 +values clariffication 12 +values clariffication 14 +values clariffication 15 +values clariffication 21 +values clariffication 23 +values clariffication 30 +values clariffication 32 +values clariffication 42 +values clariffication 50 +values clariffication 53 +values clariffication 56 +values clariffication 57 +values clariffication 62 +values clariffication 70 +values clariffication 74 +values clariffication 80 +values clariffication 85 +values clariffication 92 +values clariffication 93 +values clariffication 96 +values clariffication 97 +values clariffication 98 +values clariffication 108 +values clariffication 118 +values clariffication 120 +values clariffication 122 +values clariffication 124 +values clariffication 126 +values clariffication 127 +wind surfing NULL +wind surfing -124 +wind surfing -121 +wind surfing -117 +wind surfing -116 +wind surfing -113 +wind surfing -111 +wind surfing -104 +wind surfing -102 +wind surfing -99 +wind surfing -98 +wind surfing -96 +wind surfing -83 +wind surfing -80 +wind surfing -78 +wind surfing -71 +wind surfing -65 +wind surfing -60 +wind surfing -57 +wind surfing -56 +wind surfing -42 +wind surfing -39 +wind surfing -38 +wind surfing -34 +wind surfing -31 +wind surfing -30 +wind surfing -21 +wind surfing -18 +wind surfing -14 +wind surfing -12 +wind surfing -8 +wind surfing -6 +wind surfing -2 +wind surfing 5 +wind surfing 11 +wind surfing 15 +wind surfing 16 +wind surfing 20 +wind surfing 21 +wind surfing 22 +wind surfing 26 +wind surfing 29 +wind surfing 33 +wind surfing 36 +wind surfing 37 +wind surfing 40 +wind surfing 41 +wind surfing 44 +wind surfing 45 +wind surfing 48 +wind surfing 52 +wind surfing 53 +wind surfing 54 +wind surfing 58 +wind surfing 59 +wind surfing 62 +wind surfing 63 +wind surfing 64 +wind surfing 66 +wind surfing 68 +wind surfing 71 +wind surfing 75 +wind surfing 78 +wind surfing 86 +wind surfing 88 +wind surfing 92 +wind surfing 96 +wind surfing 101 +wind surfing 102 +wind surfing 107 +wind surfing 108 +wind surfing 111 +wind surfing 112 +wind surfing 121 +xylophone band NULL +xylophone band -115 +xylophone band -112 +xylophone band -109 +xylophone band -104 +xylophone band -101 +xylophone band -98 +xylophone band -96 +xylophone band -94 +xylophone band -91 +xylophone band -78 +xylophone band -75 +xylophone band -72 +xylophone band -70 +xylophone band -68 +xylophone band -59 +xylophone band -46 +xylophone band -27 +xylophone band -26 +xylophone band -23 +xylophone band -15 +xylophone band -13 +xylophone band -10 +xylophone band -6 +xylophone band -3 +xylophone band -2 +xylophone band 0 +xylophone band 3 +xylophone band 6 +xylophone band 7 +xylophone band 9 +xylophone band 11 +xylophone band 14 +xylophone band 17 +xylophone band 19 +xylophone band 22 +xylophone band 23 +xylophone band 33 +xylophone band 36 +xylophone band 38 +xylophone band 40 +xylophone band 52 +xylophone band 60 +xylophone band 62 +xylophone band 77 +xylophone band 79 +xylophone band 84 +xylophone band 85 +xylophone band 88 +xylophone band 92 +xylophone band 94 +xylophone band 107 +xylophone band 108 +xylophone band 112 +xylophone band 118 +xylophone band 122 +xylophone band 123 +xylophone band 125 +xylophone band 127 +yard duty NULL +yard duty -127 +yard duty -115 +yard duty -114 +yard duty -109 +yard duty -103 +yard duty -102 +yard duty -100 +yard duty -98 +yard duty -93 +yard duty -91 +yard duty -88 +yard duty -86 +yard duty -85 +yard duty -82 +yard duty -76 +yard duty -62 +yard duty -61 +yard duty -57 +yard duty -53 +yard duty -51 +yard duty -50 +yard duty -49 +yard duty -48 +yard duty -46 +yard duty -45 +yard duty -44 +yard duty -33 +yard duty -28 +yard duty -27 +yard duty -21 +yard duty -19 +yard duty -17 +yard duty -15 +yard duty -1 +yard duty 8 +yard duty 9 +yard duty 10 +yard duty 15 +yard duty 18 +yard duty 22 +yard duty 25 +yard duty 28 +yard duty 30 +yard duty 34 +yard duty 42 +yard duty 48 +yard duty 53 +yard duty 55 +yard duty 57 +yard duty 60 +yard duty 64 +yard duty 65 +yard duty 78 +yard duty 86 +yard duty 90 +yard duty 102 +yard duty 105 +yard duty 110 +zync studies NULL +zync studies -127 +zync studies -117 +zync studies -116 +zync studies -111 +zync studies -105 +zync studies -103 +zync studies -102 +zync studies -94 +zync studies -90 +zync studies -83 +zync studies -79 +zync studies -78 +zync studies -68 +zync studies -61 +zync studies -58 +zync studies -57 +zync studies -54 +zync studies -52 +zync studies -50 +zync studies -45 +zync studies -40 +zync studies -39 +zync studies -38 +zync studies -37 +zync studies -36 +zync studies -35 +zync studies -34 +zync studies -33 +zync studies -32 +zync studies -27 +zync studies -26 +zync studies -21 +zync studies -20 +zync studies -12 +zync studies 1 +zync studies 7 +zync studies 9 +zync studies 11 +zync studies 15 +zync studies 19 +zync studies 31 +zync studies 37 +zync studies 48 +zync studies 59 +zync studies 63 +zync studies 70 +zync studies 87 +zync studies 90 +zync studies 91 +zync studies 98 +zync studies 99 +zync studies 100 +zync studies 116 +zync studies 120 diff --git a/ql/src/test/results/clientpositive/vector_elt.q.out b/ql/src/test/results/clientpositive/vector_elt.q.out index 55f63b7..d3cc179 100644 --- a/ql/src/test/results/clientpositive/vector_elt.q.out +++ b/ql/src/test/results/clientpositive/vector_elt.q.out @@ -104,13 +104,13 @@ STAGE PLANS: Select Operator expressions: 'defg' (type: string), 'cc' (type: string), 'abc' (type: string), '2' (type: string), '12345' (type: string), '123456789012' (type: string), '1.25' (type: string), '16.0' (type: string), null (type: void), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 12288 Data size: 10752000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 12288 Data size: 8687616 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 1 - Statistics: Num rows: 1 Data size: 875 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 707 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 875 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 707 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/vector_groupby_3.q.out new file mode 100644 index 0000000..aff5082 --- /dev/null +++ b/ql/src/test/results/clientpositive/vector_groupby_3.q.out @@ -0,0 +1,1862 @@ +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: explain +select s, t, max(b) from vectortab2korc group by s, t +PREHOOK: type: QUERY +POSTHOOK: query: explain +select s, t, max(b) from vectortab2korc group by s, t +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: s (type: string), t (type: tinyint), b (type: bigint) + outputColumnNames: s, t, b + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(b) + keys: s (type: string), t (type: tinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: tinyint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: tinyint) + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: tinyint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: tinyint), _col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select s, t, max(b) from vectortab2korc group by s, t +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +POSTHOOK: query: select s, t, max(b) from vectortab2korc group by s, t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### + NULL 2714 + -116 8945302550165004288 + -114 1312 + -113 7569249672628789248 + -108 8939431770838810624 + -107 1906 + -104 8268875586442256384 + -91 3965 + -87 7078641038157643776 + -86 2862 + -85 -7139677575412686848 + -84 -7085247548404178944 + -83 8900351886974279680 + -80 -8938849835283677184 + -75 NULL + -72 9149216169284091904 + -66 3197 + -53 -8619303037130301440 + -46 -9080568167841226752 + -38 2933 + -36 -7512297136103800832 + -33 1075 + -29 8371939471056470016 + -28 -7292078334519894016 + -25 2262 + -24 898 + -19 9094945190752903168 + -16 -7673901622181953536 + -14 7983789401706094592 + -12 -7413317118463164416 + -10 8991442360387584000 + -5 3534 + -4 -9014145341570203648 + -2 8759184090543857664 + 0 8163948965373386752 + 4 7062382339142156288 + 5 -8703026916864802816 + 8 2905 + 12 2553 + 16 8983857919580209152 + 19 2201 + 22 9191943992860327936 + 23 NULL + 26 -7916510129632296960 + 29 8555933456197828608 + 31 2941 + 37 8143462899383345152 + 42 NULL + 46 3174 + 48 -8566856504746352640 + 54 -7242345057866285056 + 57 8230371298967609344 + 59 2494 + 60 -6986178228432322560 + 62 779 + 64 7368920486374989824 + 79 8736061027343859712 + 84 NULL + 87 797 + 88 8782900615468302336 + 90 2977 + 91 2193 + 96 -7866079955473989632 + 97 2017 + 102 -7036607470351654912 + 103 780 + 105 -7511952204985049088 + 109 1436 + 112 -7063777488249085952 + 113 8775009214012456960 + 114 1145 + 123 3625 +american history NULL 3555 +american history -127 1719 +american history -119 19 +american history -116 3812 +american history -112 7731443941834678272 +american history -110 -8989473881707921408 +american history -108 7359004378440146944 +american history -105 7870277756614623232 +american history -103 -6962271229404348416 +american history -101 7348598907182800896 +american history -95 NULL +american history -94 9119046173224370176 +american history -92 -7612455481940246528 +american history -90 1321 +american history -89 -7571293705217687552 +american history -87 -7536330682873937920 +american history -86 -8731068123910987776 +american history -81 2700 +american history -80 485 +american history -73 2487 +american history -66 -8368487814665895936 +american history -60 1566 +american history -59 3949 +american history -55 8407869317250220032 +american history -53 1955 +american history -50 7857878068300898304 +american history -48 -8016589197379289088 +american history -47 -8566940231897874432 +american history -34 4018 +american history -30 -8789178184387641344 +american history -28 -7720966287634112512 +american history -27 NULL +american history -26 -7057750467944931328 +american history -19 78 +american history -18 -8659643752269242368 +american history -17 2968 +american history -15 9048002942653710336 +american history -11 8714829359200747520 +american history -9 -8503342882470019072 +american history 0 6928080429732536320 +american history 5 135 +american history 20 -7037375807670501376 +american history 22 8467976965865799680 +american history 29 -8243487285852766208 +american history 31 -8007017894942638080 +american history 33 9043089884440068096 +american history 38 7061809776248545280 +american history 39 7922443154272395264 +american history 40 2886 +american history 42 8190967051000659968 +american history 46 8374321007870836736 +american history 48 6962726713896484864 +american history 54 -7784419454650843136 +american history 56 1890 +american history 60 3512 +american history 68 -8067243114610532352 +american history 69 -7199983995864711168 +american history 71 8698055291501543424 +american history 72 8551446856960942080 +american history 74 9038087402564657152 +american history 77 1837 +american history 79 -7542857121910046720 +american history 82 3980 +american history 84 2105 +american history 89 8806507556248731648 +american history 96 823 +american history 100 8760285623204290560 +american history 103 -7868306678534193152 +american history 111 522 +american history 119 8571268359622172672 +american history 122 7497276415392407552 +american history 123 -7488345684795342848 +biology NULL 9116137265342169088 +biology -127 1270 +biology -121 8109381965028548608 +biology -120 8388363436324085760 +biology -119 7054938591408996352 +biology -104 -8683802826440105984 +biology -101 -7198372044947275776 +biology -98 -8940944155843461120 +biology -97 -7827420207675105280 +biology -96 NULL +biology -92 1142 +biology -90 7775034125776363520 +biology -86 3755 +biology -85 -6935548339131138048 +biology -83 7232273749940838400 +biology -82 -7040248820505149440 +biology -81 NULL +biology -79 3566 +biology -72 -7915999634274369536 +biology -67 8532016240026279936 +biology -66 -7623047151287754752 +biology -59 7534042483076857856 +biology -57 7049773031131283456 +biology -54 -7319315187617587200 +biology -53 -6962292590214234112 +biology -49 NULL +biology -46 2241 +biology -44 2680 +biology -42 2469 +biology -40 -7469660864676585472 +biology -36 1489 +biology -35 2285 +biology -31 -9032650742739836928 +biology -20 7130159794259353600 +biology -19 -7442593976514420736 +biology -16 3960 +biology -13 2463 +biology -7 -8665218198816497664 +biology -6 888 +biology -5 808 +biology 0 523 +biology 2 -8387347109404286976 +biology 3 -9075486079396069376 +biology 4 482 +biology 5 7376467688511455232 +biology 6 -7797149520019062784 +biology 7 454 +biology 8 950 +biology 12 2492 +biology 18 -7255010240787030016 +biology 27 -9049720998034137088 +biology 30 835 +biology 34 2517 +biology 35 8017403886247927808 +biology 36 2539 +biology 41 7052226236896256000 +biology 47 3974 +biology 51 3728 +biology 52 9084402694981533696 +biology 54 -7330203470474985472 +biology 55 1785 +biology 57 9083704659251798016 +biology 64 2400 +biology 83 -8172827216441573376 +biology 86 3199 +biology 89 7747874976739016704 +biology 90 -7598782894648565760 +biology 91 -8131997716860526592 +biology 100 -8782213262837530624 +biology 111 6933731240564056064 +biology 113 664 +biology 114 1343 +biology 115 -7017212700635545600 +biology 117 120 +biology 120 3443 +biology 124 -6919476845891313664 +chemistry NULL 8854677881758162944 +chemistry -127 -7637755520917741568 +chemistry -125 3418 +chemistry -122 -8232763638546694144 +chemistry -117 8014986215157530624 +chemistry -115 3307 +chemistry -110 3430 +chemistry -109 -7624057992767782912 +chemistry -108 7255302164215013376 +chemistry -103 -8099313480512716800 +chemistry -101 2599 +chemistry -98 9005866015985713152 +chemistry -95 -7470307155642245120 +chemistry -91 9030480306789818368 +chemistry -90 1291 +chemistry -89 2968 +chemistry -81 -8664806103426252800 +chemistry -75 375 +chemistry -68 -8813211231120031744 +chemistry -61 -8021859935185928192 +chemistry -59 -7572262898020278272 +chemistry -57 -7362189611124563968 +chemistry -56 -7663293054873812992 +chemistry -52 4056 +chemistry -46 -7610137349734883328 +chemistry -44 7596563216912211968 +chemistry -40 4054 +chemistry -37 NULL +chemistry -33 7545689659010949120 +chemistry -31 -9080956291212132352 +chemistry -30 2001 +chemistry -26 -9178166810751909888 +chemistry -23 -8518060755719585792 +chemistry -8 8487573502287478784 +chemistry -3 3245 +chemistry -2 -7500200359698907136 +chemistry 3 2933 +chemistry 4 2138 +chemistry 8 873 +chemistry 14 7198687580227043328 +chemistry 16 7229607057201127424 +chemistry 19 NULL +chemistry 25 236 +chemistry 27 1826 +chemistry 32 1775 +chemistry 37 41 +chemistry 39 2663 +chemistry 41 8693036785094565888 +chemistry 46 1280 +chemistry 51 392 +chemistry 55 -8843859708698583040 +chemistry 64 9091085792947666944 +chemistry 74 837 +chemistry 75 3860 +chemistry 78 2476 +chemistry 79 8644602243484803072 +chemistry 83 1286 +chemistry 88 -8082793390939193344 +chemistry 90 7794244032613703680 +chemistry 91 3866 +chemistry 96 -7395343938785738752 +chemistry 102 -7907355742053883904 +chemistry 104 3609 +chemistry 110 NULL +chemistry 113 7149417430082027520 +chemistry 116 7768984605670604800 +chemistry 124 1914 +chemistry 126 9083076230151864320 +debate NULL 1777 +debate -127 8337549596011102208 +debate -117 -8269917980278980608 +debate -114 2085 +debate -108 2816 +debate -106 1948 +debate -98 8011602724663336960 +debate -96 8688483860094599168 +debate -95 1368 +debate -88 8345435427356090368 +debate -87 -8028910243475038208 +debate -86 1095 +debate -85 2341 +debate -82 NULL +debate -72 7720187583697502208 +debate -70 8928133990107881472 +debate -69 7948803266578161664 +debate -67 1521 +debate -57 -8581979259158929408 +debate -52 7947544013461512192 +debate -50 7061498706968428544 +debate -47 49 +debate -46 1234 +debate -41 -8856821118526734336 +debate -35 7062605127422894080 +debate -32 -7831320202242228224 +debate -29 -7476082621253402624 +debate -28 -8379109122834997248 +debate -27 3835 +debate -26 471 +debate -24 8554899472487596032 +debate -15 34 +debate -12 7682327310082531328 +debate -3 8519937082746634240 +debate 5 -8649711322250362880 +debate 17 8560526613401714688 +debate 19 8501910015960735744 +debate 20 -8832750849949892608 +debate 26 738 +debate 34 3060 +debate 36 7792036342592348160 +debate 41 7039820685967343616 +debate 42 8557218322962644992 +debate 52 -7637494527844343808 +debate 54 7534145866886782976 +debate 56 -7161165959057334272 +debate 63 1243 +debate 67 2692 +debate 68 8045070943673671680 +debate 69 -7822452149325094912 +debate 70 7534549597202194432 +debate 73 8372408423196270592 +debate 75 2463 +debate 77 1168 +debate 85 8822384228057604096 +debate 87 8146288732715196416 +debate 89 2089 +debate 90 9078604269481148416 +debate 93 455 +debate 94 -7506254246954500096 +debate 113 1995 +debate 124 3691 +education NULL 2903 +education -127 -8302817097848307712 +education -125 2214 +education -113 -8142667274351345664 +education -111 7296164580491075584 +education -109 8150115791664340992 +education -105 1066 +education -101 7165364563962191872 +education -98 1341 +education -96 -8022573309127000064 +education -93 7343171468838567936 +education -92 2811 +education -89 1999 +education -87 345 +education -86 8473699639908261888 +education -77 9148071980848742400 +education -72 3021 +education -69 2805 +education -64 6974475559697768448 +education -61 7917494645725765632 +education -55 7989119273552158720 +education -52 9050032047355125760 +education -44 -8219876839318716416 +education -43 3911 +education -38 3749 +education -37 923 +education -36 694 +education -34 -7707242953271500800 +education -28 7735566678126616576 +education -24 1509 +education -15 -6960947572095770624 +education -13 695 +education -9 928 +education 4 -8034414142083170304 +education 12 8454154705460666368 +education 17 2232 +education 27 -8454143651040444416 +education 33 -8488247955875618816 +education 34 379 +education 35 7473537548003352576 +education 38 -8244116388227104768 +education 41 7584007864107778048 +education 45 238 +education 46 7892281003266408448 +education 49 8461498293348065280 +education 51 -7623359796281999360 +education 53 -7600138468036386816 +education 59 7432998950057975808 +education 61 NULL +education 66 7652123583449161728 +education 73 -7185369278665605120 +education 75 9054887854393950208 +education 77 3206 +education 81 -6970396058557005824 +education 82 1058 +education 85 7961909238130270208 +education 89 3409 +education 95 3764 +education 102 7375521127126089728 +education 103 7384150968511315968 +education 110 7452756603516190720 +education 111 3824 +education 113 8994608999945125888 +education 114 1846 +education 116 -8930307926221807616 +education 119 7548958830580563968 +education 125 8577096957495025664 +forestry NULL 9123116008004288512 +forestry -126 9086905513121890304 +forestry -118 8272001752345690112 +forestry -116 7997694023324975104 +forestry -109 275 +forestry -105 2724 +forestry -98 3089 +forestry -95 2485 +forestry -94 -8051871680800120832 +forestry -83 2752 +forestry -81 -8922409715403112448 +forestry -80 1165 +forestry -79 1937 +forestry -77 1247 +forestry -74 1643 +forestry -73 -7228589258642194432 +forestry -68 -6988970700649168896 +forestry -58 7989160253372817408 +forestry -51 -9002912355472736256 +forestry -50 2283 +forestry -49 417 +forestry -42 268 +forestry -34 2524 +forestry -23 7790728456522784768 +forestry -12 -8205148279289085952 +forestry -11 9194388393453060096 +forestry -10 3962 +forestry -8 7955126053367119872 +forestry -3 1165 +forestry -1 -9071565764086521856 +forestry 0 -7603467428164009984 +forestry 7 3118 +forestry 8 7299197687217856512 +forestry 11 3467 +forestry 12 8160662610166194176 +forestry 19 -8760655406971863040 +forestry 29 -7831595638727565312 +forestry 31 3663 +forestry 35 1371 +forestry 37 950 +forestry 39 6933451028794925056 +forestry 40 -8763062627136864256 +forestry 44 -8559252110266564608 +forestry 45 8553195689344991232 +forestry 50 615 +forestry 51 2580 +forestry 52 8514851182589771776 +forestry 55 -9091113592821972992 +forestry 56 7418271723644403712 +forestry 71 8779711700787298304 +forestry 78 -7845896959112658944 +forestry 79 -7818454479651135488 +forestry 87 1981 +forestry 90 8829545979081744384 +forestry 93 1422 +forestry 94 -8469607298426437632 +forestry 97 -8845239510002753536 +forestry 102 255 +forestry 106 8570983266408103936 +forestry 107 999 +forestry 111 -7138415011665043456 +forestry 115 8573305425181941760 +forestry 117 -7120456708338688000 +forestry 120 7784169796350730240 +geology NULL 8171188598958407680 +geology -124 3190 +geology -117 3421 +geology -115 -8660149447361404928 +geology -112 1462 +geology -108 -8604758220106014720 +geology -106 -7661250850555633664 +geology -101 -7078068944081002496 +geology -100 3002 +geology -96 -6957946688477274112 +geology -94 268 +geology -84 -7511202710200885248 +geology -82 8817665768680906752 +geology -80 3446 +geology -79 1614 +geology -75 -7883252982752665600 +geology -72 8391785334471589888 +geology -68 -8203008052020879360 +geology -59 7621013099259527168 +geology -57 -7594824008626372608 +geology -54 -7328087811698909184 +geology -53 -6988811476286873600 +geology -50 8120593157178228736 +geology -48 1342 +geology -35 -7510418793070075904 +geology -34 8708845895460577280 +geology -32 7487538600082554880 +geology -31 871 +geology -30 -8127494999848919040 +geology -23 1537 +geology -21 91 +geology -18 7370803940448305152 +geology -16 8979012655944220672 +geology 1 -8870673219965001728 +geology 4 9020143715350814720 +geology 5 7784489776013295616 +geology 6 3203 +geology 9 -8379964450833367040 +geology 14 -8136227554401107968 +geology 18 3763 +geology 21 8850055384477401088 +geology 23 NULL +geology 26 -7145585429014888448 +geology 28 2183 +geology 31 1892 +geology 33 7689489436826804224 +geology 37 2762 +geology 38 -8395998375405912064 +geology 49 8854715632851345408 +geology 58 -8856151919723003904 +geology 72 2179 +geology 82 6967631925774639104 +geology 84 8316336224427483136 +geology 86 -9101953184875757568 +geology 89 1346 +geology 92 -7833618000492109824 +geology 93 NULL +geology 95 -7838598833900584960 +geology 100 7528074274555305984 +geology 101 7701723309715685376 +geology 102 -8297230235506343936 +geology 116 3073 +geology 121 213 +geology 124 7779486624537370624 +geology 127 -7703540456272994304 +history NULL 3231 +history -125 1856 +history -124 -8318886086186213376 +history -105 3701 +history -104 1769 +history -103 -7623405558242500608 +history -101 154 +history -98 2046 +history -91 1651 +history -90 65 +history -89 3418 +history -79 1796 +history -75 -8783777723063099392 +history -67 -7679894005808693248 +history -61 8325227661920133120 +history -58 8376440110255243264 +history -54 2393 +history -42 8987827141270880256 +history -37 3946 +history -34 -7192529627893858304 +history -26 NULL +history -24 3079 +history -19 1791 +history -18 3770 +history -15 2013 +history -12 154 +history -8 2187 +history -5 1667 +history -1 -8544299740525461504 +history 1 8637720762289659904 +history 2 7648729477297987584 +history 7 7647481735646363648 +history 14 6947488599548215296 +history 18 -7778829032042790912 +history 25 3866 +history 50 3874 +history 51 8135164922674872320 +history 52 -6921654334727036928 +history 57 3690 +history 58 3664 +history 59 7514552840617558016 +history 61 -8411282676082565120 +history 62 3024 +history 71 -8730803262481580032 +history 72 7099005292698550272 +history 73 2776 +history 75 -7547245548870025216 +history 78 -9102482277760983040 +history 84 -7162299524557471744 +history 92 921 +history 95 3769 +history 98 3588 +history 100 263 +history 108 -7800879252150779904 +history 112 -7419068456205385728 +history 113 3728 +history 114 289 +history 126 2810 +history 127 -7884460946615984128 +industrial engineering NULL 3060 +industrial engineering -124 3249 +industrial engineering -110 2560 +industrial engineering -101 -7617860842651017216 +industrial engineering -98 8368012468775608320 +industrial engineering -96 688 +industrial engineering -87 7486884806277611520 +industrial engineering -85 -7512289590991544320 +industrial engineering -72 3208 +industrial engineering -68 -9206329156028112896 +industrial engineering -65 9112400579327483904 +industrial engineering -58 650 +industrial engineering -57 -8521578237232529408 +industrial engineering -53 68 +industrial engineering -49 181 +industrial engineering -47 2911 +industrial engineering -43 504 +industrial engineering -38 -8581765103969312768 +industrial engineering -35 1726 +industrial engineering -31 1520 +industrial engineering -30 3472 +industrial engineering -28 724 +industrial engineering -22 -7540104552219860992 +industrial engineering -11 -7802538500225777664 +industrial engineering -7 2420 +industrial engineering -6 9185458640237641728 +industrial engineering -5 2485 +industrial engineering -1 8087737899452432384 +industrial engineering 3 3682 +industrial engineering 7 7573530789362262016 +industrial engineering 27 -8445801063348281344 +industrial engineering 29 3365 +industrial engineering 32 364 +industrial engineering 33 -7240213957902663680 +industrial engineering 36 1158 +industrial engineering 42 2506 +industrial engineering 43 3725 +industrial engineering 48 355 +industrial engineering 54 691 +industrial engineering 58 8195103847607967744 +industrial engineering 59 8808467247666241536 +industrial engineering 70 9091082386452684800 +industrial engineering 73 -7011425384222244864 +industrial engineering 78 -9136398397785948160 +industrial engineering 79 -8948335470186373120 +industrial engineering 83 NULL +industrial engineering 95 8192304692696383488 +industrial engineering 96 -7910019233726242816 +industrial engineering 98 -7524170566881329152 +industrial engineering 99 -9215144824304721920 +industrial engineering 102 2900 +industrial engineering 104 -8875546987176206336 +industrial engineering 105 8071961599867387904 +industrial engineering 106 878 +industrial engineering 113 1862 +industrial engineering 126 -6968892545529896960 +joggying NULL -8877053610728161280 +joggying -125 7823874904139849728 +joggying -121 3103 +joggying -119 2189 +joggying -110 -8870186814744420352 +joggying -101 -8675661101615489024 +joggying -100 7080269176324218880 +joggying -80 8254763178969915392 +joggying -79 -7939634346485858304 +joggying -77 7626715182847090688 +joggying -76 NULL +joggying -73 8011181697250631680 +joggying -69 7678790769408172032 +joggying -64 2373 +joggying -62 -8425998949410889728 +joggying -61 8687042963221159936 +joggying -57 8936639033158410240 +joggying -55 8208354137450766336 +joggying -48 8351163199364390912 +joggying -47 NULL +joggying -43 8323460620425330688 +joggying -40 3781 +joggying -30 2217 +joggying -27 2790 +joggying -24 -7083646746411720704 +joggying -15 -8658387566611996672 +joggying -14 -8358130693961195520 +joggying -8 8723248113030782976 +joggying -1 7844258063629852672 +joggying 13 8525894870444638208 +joggying 20 425 +joggying 25 1556 +joggying 26 2325 +joggying 27 1290 +joggying 28 -8858063395050110976 +joggying 37 -7378096180613840896 +joggying 43 7260908278294560768 +joggying 46 8905330479248064512 +joggying 48 1337 +joggying 49 -8047774491688255488 +joggying 52 2803 +joggying 57 8183233196086214656 +joggying 61 3253 +joggying 62 -8359839265974165504 +joggying 69 8302473563519950848 +joggying 70 1965 +joggying 72 976 +joggying 74 -7751265769984491520 +joggying 80 7454442625055145984 +joggying 85 7748799008146366464 +joggying 87 94 +joggying 92 7818464507324121088 +joggying 93 8416121695917498368 +joggying 94 7599019810193211392 +joggying 97 2565 +joggying 99 1863 +joggying 104 1864 +joggying 105 2002 +joggying 118 -8108693586698706944 +joggying 119 -7892780594910871552 +joggying 121 1987 +joggying 123 NULL +joggying 125 2842 +kindergarten NULL 6933001829416034304 +kindergarten -126 2509 +kindergarten -113 259 +kindergarten -106 -8103788088118018048 +kindergarten -98 982 +kindergarten -95 8983912573761167360 +kindergarten -92 NULL +kindergarten -79 -7751427073017544704 +kindergarten -78 7524958388842078208 +kindergarten -75 7017956982081404928 +kindergarten -74 -8632237187473088512 +kindergarten -69 1813 +kindergarten -60 7027529814236192768 +kindergarten -59 8991071342495531008 +kindergarten -57 -7949309059286163456 +kindergarten -54 8896237972875370496 +kindergarten -42 -7094827141662539776 +kindergarten -40 7084659344078970880 +kindergarten -26 7226360892091416576 +kindergarten -18 7696737688942567424 +kindergarten -8 -7420448501073051648 +kindergarten 10 3111 +kindergarten 16 7753882935005880320 +kindergarten 18 -7395553021620731904 +kindergarten 19 -8104684579106914304 +kindergarten 23 8871707618793996288 +kindergarten 29 3248 +kindergarten 37 3493 +kindergarten 46 958 +kindergarten 48 -8572949572756774912 +kindergarten 51 8543177193114779648 +kindergarten 52 8868529429494071296 +kindergarten 55 -7404057145074712576 +kindergarten 61 7710447533880614912 +kindergarten 66 2735 +kindergarten 69 73 +kindergarten 82 530 +kindergarten 84 7998357471114969088 +kindergarten 85 7926898770090491904 +kindergarten 86 NULL +kindergarten 90 8972161729142095872 +kindergarten 92 8716401555586727936 +kindergarten 96 -7429331808102899712 +kindergarten 100 108 +kindergarten 101 7166263463731421184 +kindergarten 109 2962 +kindergarten 111 2320 +kindergarten 116 9207927479837319168 +kindergarten 118 -7819437864839495680 +kindergarten 120 7779735136559579136 +kindergarten 122 -7079898537463537664 +kindergarten 127 2223 +linguistics NULL 8383159090746204160 +linguistics -127 -8896045754034978816 +linguistics -122 -7695491171376291840 +linguistics -113 7614435638888210432 +linguistics -101 -8017791189288869888 +linguistics -90 -7739424919198187520 +linguistics -89 8489735221193138176 +linguistics -87 2244 +linguistics -86 NULL +linguistics -78 8518454006987948032 +linguistics -77 7686992843032010752 +linguistics -73 -8916987977485312000 +linguistics -70 2277 +linguistics -69 -7104310188119834624 +linguistics -68 8184799300477943808 +linguistics -67 NULL +linguistics -53 NULL +linguistics -52 -8651641150831362048 +linguistics -41 1811 +linguistics -34 3958 +linguistics -28 7345991518378442752 +linguistics -22 8489584373231919104 +linguistics -20 7620183559667081216 +linguistics -17 9075404705968840704 +linguistics -16 2662 +linguistics -14 -9203804401302323200 +linguistics -13 7566273236152721408 +linguistics -12 NULL +linguistics -6 8145750910080745472 +linguistics -4 3789 +linguistics -2 -7501803640821456896 +linguistics 0 9023663198045544448 +linguistics 1 1386 +linguistics 3 7386087924003676160 +linguistics 6 8558000156325707776 +linguistics 7 9048297564833079296 +linguistics 10 2846 +linguistics 11 83 +linguistics 18 1261 +linguistics 33 1086 +linguistics 37 1777 +linguistics 42 9117063974299148288 +linguistics 44 9136548192574529536 +linguistics 50 9188173682239275008 +linguistics 53 1447 +linguistics 64 1704 +linguistics 67 -7201085131997011968 +linguistics 72 204 +linguistics 73 2502 +linguistics 83 NULL +linguistics 89 8116738401948377088 +linguistics 93 -7879864376629567488 +linguistics 96 803 +linguistics 98 7898670840507031552 +linguistics 100 3622 +linguistics 113 7217123582035116032 +linguistics 115 -6920172215209426944 +linguistics 123 7762823913046556672 +linguistics 125 1074 +linguistics 126 9067985867711291392 +mathematics NULL 9001907486943993856 +mathematics -127 -7158472098920390656 +mathematics -124 8290014929764040704 +mathematics -122 -7453525026342617088 +mathematics -120 3322 +mathematics -118 -6997233584896229376 +mathematics -117 2786 +mathematics -103 658 +mathematics -101 -8756989568739835904 +mathematics -100 7662037650719850496 +mathematics -99 NULL +mathematics -98 -7425160895830573056 +mathematics -95 490 +mathematics -91 8223732800007864320 +mathematics -88 -7115054815375073280 +mathematics -81 7753359568986636288 +mathematics -79 8111757081791733760 +mathematics -77 7581614118458335232 +mathematics -75 -7221474017515347968 +mathematics -66 -7894382303337832448 +mathematics -57 8219326436390821888 +mathematics -52 8435912708683087872 +mathematics -50 7746402369011277824 +mathematics -49 8156018594610790400 +mathematics -46 8210813831744118784 +mathematics -45 7237310132329488384 +mathematics -40 -8518258741831680000 +mathematics -33 7461153404961128448 +mathematics -31 681 +mathematics -21 -7661192563533062144 +mathematics -19 3159 +mathematics -7 8396433451610652672 +mathematics 0 8282648443538710528 +mathematics 3 -8887058200926093312 +mathematics 6 1701 +mathematics 10 7259955893466931200 +mathematics 22 6934570741217755136 +mathematics 23 7271887863395459072 +mathematics 25 -7333362172439035904 +mathematics 32 2073 +mathematics 35 -7558524160894427136 +mathematics 38 -7557017910095650816 +mathematics 39 2579 +mathematics 46 -7759425383684849664 +mathematics 48 1366 +mathematics 50 3029 +mathematics 53 7549858023389003776 +mathematics 55 2227 +mathematics 56 898 +mathematics 58 3830 +mathematics 59 7487338208419823616 +mathematics 62 883 +mathematics 63 8156782979767238656 +mathematics 65 1648 +mathematics 76 8927691194719174656 +mathematics 79 7871189141676998656 +mathematics 80 NULL +mathematics 82 1093 +mathematics 87 3707 +mathematics 92 -8754992450211692544 +mathematics 98 2398 +mathematics 102 690 +mathematics 107 8391407951622815744 +mathematics 111 2607 +mathematics 114 3094 +nap time NULL -8430283518005846016 +nap time -122 8660248367767076864 +nap time -119 2715 +nap time -115 -7576194692683563008 +nap time -113 8451612303224520704 +nap time -104 -7709958788604936192 +nap time -102 -6938706403992854528 +nap time -101 2229 +nap time -91 85 +nap time -85 3932 +nap time -71 9136234417125007360 +nap time -61 1524 +nap time -54 -8562524688907485184 +nap time -49 8079573715140485120 +nap time -45 7310869618402910208 +nap time -41 7801697837312884736 +nap time -31 939 +nap time -15 7401968422230032384 +nap time -6 7843804446688264192 +nap time -4 736 +nap time -2 8697823501349609472 +nap time 0 1940 +nap time 3 9064847977742032896 +nap time 6 7871554728617025536 +nap time 16 NULL +nap time 23 7432428551399669760 +nap time 27 1454 +nap time 31 6964585306125008896 +nap time 35 2689 +nap time 42 8652485812846567424 +nap time 51 -7881262505761710080 +nap time 52 2463 +nap time 56 -8240034910581153792 +nap time 62 NULL +nap time 69 NULL +nap time 70 66 +nap time 73 7782245855193874432 +nap time 76 -8543982423727128576 +nap time 80 2647 +nap time 89 -7888051992910274560 +nap time 90 7761834341179375616 +nap time 92 8540237852367446016 +nap time 93 -8203075743525806080 +nap time 98 7691062622443044864 +nap time 103 361 +nap time 104 7125231541858205696 +nap time 105 7045967493826387968 +nap time 107 9114850402293882880 +nap time 108 1189 +nap time 118 8910706980937261056 +opthamology NULL 8856674723376668672 +opthamology -122 9096395849845194752 +opthamology -121 3879 +opthamology -118 2072 +opthamology -111 8244041599171862528 +opthamology -99 -7875953567586451456 +opthamology -97 8752150411997356032 +opthamology -91 2255 +opthamology -86 -9066993118333706240 +opthamology -82 1606 +opthamology -79 -9117959922369060864 +opthamology -78 -7303847963918393344 +opthamology -77 8631515095562887168 +opthamology -75 8779073705407963136 +opthamology -73 908 +opthamology -63 470 +opthamology -59 3021 +opthamology -55 2177 +opthamology -49 8854495099223375872 +opthamology -48 -7877598807023386624 +opthamology -44 2675 +opthamology -35 2274 +opthamology -33 1613 +opthamology -30 8048726769133592576 +opthamology -28 8849475396952514560 +opthamology -27 3599 +opthamology -24 383 +opthamology -22 7718825401976684544 +opthamology -18 2393 +opthamology -17 -7629401308029976576 +opthamology -14 3235 +opthamology -8 7333512171174223872 +opthamology -5 7411793502161182720 +opthamology 4 9131533983989358592 +opthamology 5 130 +opthamology 21 7069729473166090240 +opthamology 39 2745 +opthamology 41 NULL +opthamology 48 3159 +opthamology 49 -7904188195431661568 +opthamology 52 -7081500255163727872 +opthamology 53 -7055760785575665664 +opthamology 55 2335 +opthamology 68 -8866442231663067136 +opthamology 69 2144 +opthamology 74 -7708932208121225216 +opthamology 76 -7647020450676146176 +opthamology 77 -6934304742087655424 +opthamology 79 1508 +opthamology 81 8920344895701393408 +opthamology 84 927 +opthamology 87 6924820982050758656 +opthamology 88 -8593419958317056000 +opthamology 89 -7978782649203228672 +opthamology 92 NULL +opthamology 96 -8418913260807217152 +opthamology 97 8935252708196999168 +opthamology 100 -7532751268425261056 +opthamology 104 1866 +opthamology 117 2835 +opthamology 120 -8340523561480437760 +opthamology 122 3462 +opthamology 125 965 +opthamology 127 412 +philosophy NULL 8759089349412847616 +philosophy -125 9199741683232399360 +philosophy -121 342 +philosophy -119 -8507279516485566464 +philosophy -115 1719 +philosophy -110 7471208109437304832 +philosophy -105 -7172594404186693632 +philosophy -103 8144552446127972352 +philosophy -100 -7603569103205916672 +philosophy -99 -8030058711611629568 +philosophy -95 3460 +philosophy -93 8720504651219001344 +philosophy -92 8649296591032172544 +philosophy -80 -7035132060308643840 +philosophy -78 1752 +philosophy -77 -7344947507044466688 +philosophy -69 2897 +philosophy -68 9185952983951343616 +philosophy -61 7271786885641666560 +philosophy -56 -7593363318079610880 +philosophy -55 3366 +philosophy -53 6987889924212203520 +philosophy -52 2824 +philosophy -51 2180 +philosophy -50 8875745082589929472 +philosophy -40 3478 +philosophy -39 1141 +philosophy -27 -8710298418608619520 +philosophy -26 -7344146703223496704 +philosophy -25 -7878145001776152576 +philosophy -17 -7515996202498473984 +philosophy -11 -7953426740065312768 +philosophy 8 -8088337436168830976 +philosophy 20 8290944180915871744 +philosophy 21 -7903158849011843072 +philosophy 22 7892026679115554816 +philosophy 29 2848 +philosophy 31 7659279803863146496 +philosophy 34 -7058986555327307776 +philosophy 38 7238339720750948352 +philosophy 41 8792059919353348096 +philosophy 43 3555 +philosophy 45 8362046808797306880 +philosophy 48 6991316084916879360 +philosophy 64 535 +philosophy 67 2715 +philosophy 68 1693 +philosophy 73 8283099811330506752 +philosophy 83 281 +philosophy 96 8682955459667951616 +philosophy 98 8613562211893919744 +philosophy 104 3541 +philosophy 108 6969599299897163776 +philosophy 117 2855 +philosophy 118 -7356685674003021824 +philosophy 120 -8300526097982226432 +philosophy 123 2140 +quiet hour NULL 7874764415950176256 +quiet hour -127 1099 +quiet hour -123 8769199243315814400 +quiet hour -121 7031339012080549888 +quiet hour -119 7608447395949109248 +quiet hour -114 8419958579638157312 +quiet hour -111 8424515140664360960 +quiet hour -105 918 +quiet hour -104 -7037638331316469760 +quiet hour -88 2919 +quiet hour -87 9182828596851990528 +quiet hour -76 -7792903881635938304 +quiet hour -73 8793387410919038976 +quiet hour -68 6982145326341423104 +quiet hour -66 383 +quiet hour -65 NULL +quiet hour -56 3567 +quiet hour -55 8569030475428511744 +quiet hour -52 8201303040648052736 +quiet hour -50 7998687089080467456 +quiet hour -48 8398862954249560064 +quiet hour -45 7378993334503694336 +quiet hour -42 NULL +quiet hour -41 7231399302953377792 +quiet hour -33 7637152193832886272 +quiet hour -31 -7744462446680375296 +quiet hour -25 NULL +quiet hour -14 997 +quiet hour -8 -7329767178250018816 +quiet hour -1 9085434340468473856 +quiet hour 0 7450416810848313344 +quiet hour 6 8795069490394882048 +quiet hour 7 2131 +quiet hour 8 -7265998318110711808 +quiet hour 13 2560 +quiet hour 21 -8293833565967810560 +quiet hour 23 1880 +quiet hour 29 2323 +quiet hour 30 9062227900376203264 +quiet hour 33 7528211148397944832 +quiet hour 35 -7046180371529351168 +quiet hour 38 2725 +quiet hour 43 8069531888205086720 +quiet hour 58 2461 +quiet hour 60 NULL +quiet hour 66 3770 +quiet hour 71 500 +quiet hour 74 -7902517224300036096 +quiet hour 80 7006803044329021440 +quiet hour 82 8853989376829833216 +quiet hour 84 8920533610804609024 +quiet hour 93 -7873753603299540992 +quiet hour 98 -9008631121684832256 +quiet hour 110 2186 +quiet hour 112 7436133434239229952 +quiet hour 115 1641 +quiet hour 120 919 +quiet hour 121 1506 +quiet hour 123 7800332581637259264 +religion NULL 7295502697317097472 +religion -125 2106 +religion -106 NULL +religion -104 820 +religion -94 9174894805640142848 +religion -93 491 +religion -81 8463868417649524736 +religion -78 7497306924248834048 +religion -77 NULL +religion -76 -8959796625322680320 +religion -71 296 +religion -70 -7461750143936897024 +religion -69 -7433265617153343488 +religion -64 -8430370933326536704 +religion -62 913 +religion -60 7700734109530767360 +religion -56 782 +religion -44 -8807361476639629312 +religion -42 -9213132862973829120 +religion -41 7266437490436341760 +religion -38 -8140349174954893312 +religion -35 7295926343524163584 +religion -32 1537 +religion -29 8221561626658881536 +religion -28 -8857335871148171264 +religion -26 1039 +religion -24 2194 +religion -23 3183 +religion -9 -8696162322976997376 +religion -7 -6968771079156654080 +religion -3 203 +religion 0 8996824426131390464 +religion 2 8995562121346260992 +religion 4 2803 +religion 5 -7159700138947862528 +religion 15 4088 +religion 17 1780 +religion 29 -7612466483992051712 +religion 31 1021 +religion 38 1751 +religion 44 -8509547439040757760 +religion 45 -8070535484085895168 +religion 49 8836228556823977984 +religion 52 7250237407877382144 +religion 54 9211455920344088576 +religion 58 3467 +religion 67 8113585123802529792 +religion 70 7919597361814577152 +religion 73 9053187076403060736 +religion 74 815 +religion 76 -7273694358642851840 +religion 78 -8051587217208967168 +religion 82 3119 +religion 92 2067 +religion 93 3848 +religion 96 -8317591428117274624 +religion 97 3456 +religion 102 -8471480409335513088 +religion 103 8815398225009967104 +religion 106 3058 +religion 107 3810 +religion 110 -7849504559236210688 +religion 115 -7712425776235274240 +religion 120 5 +religion 123 979 +religion 124 8899122608190930944 +study skills NULL 8201491077550874624 +study skills -127 -8559008501282832384 +study skills -126 3507 +study skills -117 -8400045653258444800 +study skills -107 8785153741735616512 +study skills -106 8002769767000145920 +study skills -100 -8962547695651323904 +study skills -88 2551 +study skills -86 3990 +study skills -82 NULL +study skills -81 612 +study skills -76 NULL +study skills -73 -8535957064499879936 +study skills -65 8332670681629106176 +study skills -52 3533 +study skills -36 -8485389240529354752 +study skills -33 -7213775605408178176 +study skills -27 961 +study skills -26 -9187662685618348032 +study skills -22 7204802700490858496 +study skills -17 3144 +study skills -14 3913 +study skills -13 7195454019231834112 +study skills -6 -7296096276653391872 +study skills -5 2412 +study skills -4 1094 +study skills -3 707 +study skills -1 7381659098423926784 +study skills 2 -7507578199583694848 +study skills 3 7291432593139507200 +study skills 18 743 +study skills 21 7274777328897802240 +study skills 23 8333523087360901120 +study skills 25 -8704234107608203264 +study skills 28 -8494118409594650624 +study skills 29 -7220731681653604352 +study skills 30 2637 +study skills 35 -7488415863027367936 +study skills 39 8897901899039473664 +study skills 40 3961 +study skills 47 -7049618574399692800 +study skills 49 -7326863346317598720 +study skills 50 7054271419461812224 +study skills 54 7128222874437238784 +study skills 55 1368 +study skills 58 -7030489936116252672 +study skills 62 8372588378498777088 +study skills 63 2512 +study skills 66 -7497303453253402624 +study skills 68 3725 +study skills 72 -8028275725610909696 +study skills 77 7354813692542304256 +study skills 80 -9078662294976061440 +study skills 83 -7779270198785875968 +study skills 92 3059 +study skills 95 7393308503950548992 +study skills 96 -8046189486447017984 +study skills 101 2295 +study skills 106 -8161047750470279168 +study skills 107 9132009829414584320 +study skills 110 -7998947380180819968 +study skills 115 7344029858387820544 +study skills 119 2264 +study skills 123 -7797151404935618560 +topology NULL 8639254009546055680 +topology -122 3941 +topology -116 -7964801953178091520 +topology -106 NULL +topology -105 -7824788571789279232 +topology -102 8783241818558193664 +topology -98 7212090742612467712 +topology -96 3568 +topology -88 -6975459232300236800 +topology -86 2515 +topology -78 7347732772348870656 +topology -74 8773222500321361920 +topology -71 4037 +topology -60 1493 +topology -58 2619 +topology -57 8895174927321243648 +topology -50 8168742078705262592 +topology -44 -8664374244449050624 +topology -42 2434 +topology -41 1153 +topology -36 3588 +topology -32 -8923529803981905920 +topology -31 -7330413050756235264 +topology -25 244 +topology -21 -8615168537390571520 +topology -5 1439 +topology -1 7391208370547269632 +topology 7 7339426767877390336 +topology 11 3333 +topology 13 8411494452500930560 +topology 14 8367680396909404160 +topology 18 -9189155542884474880 +topology 26 2218 +topology 30 2348 +topology 38 7410096605330227200 +topology 41 2608 +topology 42 8547243497773457408 +topology 47 7705445437881278464 +topology 50 NULL +topology 52 1899 +topology 54 7091300332052062208 +topology 55 -6935038507792801792 +topology 58 -7616522969329262592 +topology 59 7212016545671348224 +topology 61 1914 +topology 63 -8961059046745669632 +topology 67 3680 +topology 69 2358 +topology 71 812 +topology 80 4075 +topology 81 22 +topology 83 1477 +topology 86 294 +topology 87 8900180888218329088 +topology 94 8146492373537660928 +topology 105 462 +topology 107 112 +topology 119 8525336514806317056 +topology 121 7933040277013962752 +topology 127 -8835408234247168000 +undecided NULL 8811693967537774592 +undecided -120 7242751359672631296 +undecided -118 4078 +undecided -117 7086206629592252416 +undecided -116 7013693841855774720 +undecided -115 NULL +undecided -114 8761174805938331648 +undecided -112 367 +undecided -105 4030 +undecided -104 8625937019655200768 +undecided -96 7697541332524376064 +undecided -93 -7777884099756122112 +undecided -90 NULL +undecided -84 -9109392978217484288 +undecided -83 7175638927948562432 +undecided -78 -9157613004431998976 +undecided -69 3907 +undecided -62 -8914039133569400832 +undecided -53 1827 +undecided -52 3071 +undecided -51 481 +undecided -43 7491898395977523200 +undecided -41 7690986322714066944 +undecided -29 1774 +undecided -28 4024 +undecided -23 1371 +undecided -19 -8523434203900674048 +undecided -13 3823 +undecided -10 NULL +undecided -8 8470141334513098752 +undecided 0 7944741547145502720 +undecided 1 1671 +undecided 7 8269730157217062912 +undecided 11 9089435102788009984 +undecided 13 -7700203302632210432 +undecided 14 9190466190353661952 +undecided 27 3622 +undecided 30 1910 +undecided 33 -8465978403747037184 +undecided 37 -7507424948896415744 +undecided 45 -8335810316927213568 +undecided 47 -8503573595507761152 +undecided 50 7570474972934488064 +undecided 51 1545 +undecided 56 8583916402383601664 +undecided 69 -7140008543769042944 +undecided 76 8169878743136043008 +undecided 95 2821 +undecided 97 -7451660755269853184 +undecided 98 443 +undecided 111 9180098147855769600 +undecided 114 3006 +undecided 119 168 +undecided 123 1187 +undecided 124 7888238729321496576 +values clariffication NULL -7456869587112255488 +values clariffication -123 7412924364686458880 +values clariffication -119 NULL +values clariffication -114 7235109456886816768 +values clariffication -109 7909645665163804672 +values clariffication -108 -7246123871306244096 +values clariffication -107 8295110846998233088 +values clariffication -105 7555301305375858688 +values clariffication -100 1053 +values clariffication -98 -8013397854633648128 +values clariffication -97 8579974641030365184 +values clariffication -95 -8996954350906294272 +values clariffication -92 2011 +values clariffication -90 1423 +values clariffication -81 6996686091335884800 +values clariffication -75 2569 +values clariffication -70 3083 +values clariffication -69 -7329807949048193024 +values clariffication -67 169 +values clariffication -63 -6947955278050181120 +values clariffication -62 2712 +values clariffication -60 2971 +values clariffication -55 3904 +values clariffication -51 3637 +values clariffication -50 8199513544090730496 +values clariffication -48 7370078518278397952 +values clariffication -46 NULL +values clariffication -42 -7611584069753552896 +values clariffication -40 2942 +values clariffication -38 2991 +values clariffication -37 7581052107944361984 +values clariffication -31 763 +values clariffication -28 3352 +values clariffication -8 8148211378319933440 +values clariffication -6 2056 +values clariffication -5 3031 +values clariffication 4 489 +values clariffication 8 -8426531414463545344 +values clariffication 9 -8603817012434198528 +values clariffication 10 7186401810812059648 +values clariffication 12 8190539859890601984 +values clariffication 14 -8147405381260345344 +values clariffication 15 7575087487730196480 +values clariffication 21 NULL +values clariffication 23 3255 +values clariffication 30 9107991000536498176 +values clariffication 32 NULL +values clariffication 42 2020 +values clariffication 50 1983 +values clariffication 53 3887 +values clariffication 56 3608 +values clariffication 57 -7669169138124275712 +values clariffication 62 3910 +values clariffication 70 1287 +values clariffication 74 2533 +values clariffication 80 7220131672176058368 +values clariffication 85 2986 +values clariffication 92 -8490382417169408000 +values clariffication 93 7741854854673367040 +values clariffication 96 2625 +values clariffication 97 3858 +values clariffication 98 8515682078777081856 +values clariffication 108 1115 +values clariffication 118 -9022154842129547264 +values clariffication 120 6927260280037097472 +values clariffication 122 -9084940280061485056 +values clariffication 124 -9210275791460499456 +values clariffication 126 3673 +values clariffication 127 -8347088645602050048 +wind surfing NULL 7961515985722605568 +wind surfing -124 1048 +wind surfing -121 7164349895861829632 +wind surfing -117 1177 +wind surfing -116 1941 +wind surfing -113 4020 +wind surfing -111 8415171956168417280 +wind surfing -104 8666178591503564800 +wind surfing -102 1518 +wind surfing -99 7265141874315517952 +wind surfing -98 8457906374051020800 +wind surfing -96 3147 +wind surfing -83 1509 +wind surfing -80 3630 +wind surfing -78 -9088239683374350336 +wind surfing -71 1990 +wind surfing -65 1495 +wind surfing -60 601 +wind surfing -57 6926925215281774592 +wind surfing -56 7107604675626008576 +wind surfing -42 2619 +wind surfing -39 3554 +wind surfing -38 7410872053689794560 +wind surfing -34 -7535857766791577600 +wind surfing -31 -9105701280936501248 +wind surfing -30 3945 +wind surfing -21 NULL +wind surfing -18 -8117838333114212352 +wind surfing -14 7130306447560826880 +wind surfing -12 3974 +wind surfing -8 9016280522993975296 +wind surfing -6 -6951350560260784128 +wind surfing -2 661 +wind surfing 5 3286 +wind surfing 11 -9149719074367946752 +wind surfing 15 3940 +wind surfing 16 2193 +wind surfing 20 3703 +wind surfing 21 8004633750273925120 +wind surfing 22 -7055619148037554176 +wind surfing 26 -8746702976270385152 +wind surfing 29 1157 +wind surfing 33 1055 +wind surfing 36 1559 +wind surfing 37 7593521922173419520 +wind surfing 40 -8831091081349758976 +wind surfing 41 3722 +wind surfing 44 3462 +wind surfing 45 8287522765741301760 +wind surfing 48 7414865343000322048 +wind surfing 52 8524940073536954368 +wind surfing 53 1856 +wind surfing 54 8100036735858401280 +wind surfing 58 7517159036469575680 +wind surfing 59 3613 +wind surfing 62 -8330233444291084288 +wind surfing 63 501 +wind surfing 64 7490717730239250432 +wind surfing 66 -7840338174858199040 +wind surfing 68 8455496814886002688 +wind surfing 71 9085381906890203136 +wind surfing 75 8825059717746376704 +wind surfing 78 6923604860394528768 +wind surfing 86 9169248521377374208 +wind surfing 88 1132 +wind surfing 92 1530 +wind surfing 96 8142241016679735296 +wind surfing 101 -7848043121524228096 +wind surfing 102 2649 +wind surfing 107 1032 +wind surfing 108 8508401924853850112 +wind surfing 111 8000440057238052864 +wind surfing 112 7077311975029555200 +wind surfing 121 3579 +xylophone band NULL 3401 +xylophone band -115 -7000925438663041024 +xylophone band -112 -8705403811649355776 +xylophone band -109 -7804116532814151680 +xylophone band -104 3510 +xylophone band -101 2786 +xylophone band -98 7060236714847412224 +xylophone band -96 8984935029383389184 +xylophone band -94 -7380731416973295616 +xylophone band -91 914 +xylophone band -78 8222714144797368320 +xylophone band -75 3084 +xylophone band -72 -7642381493746483200 +xylophone band -70 -8270479187688816640 +xylophone band -68 -8768744394742235136 +xylophone band -59 1845 +xylophone band -46 -7052619594823221248 +xylophone band -27 618 +xylophone band -26 3397 +xylophone band -23 8837420822750314496 +xylophone band -15 -9051477157204770816 +xylophone band -13 8708232769657815040 +xylophone band -10 -7404052043914526720 +xylophone band -6 8410599906334097408 +xylophone band -3 2803 +xylophone band -2 -7989766326847807488 +xylophone band 0 2988 +xylophone band 3 8286706213485297664 +xylophone band 6 1676 +xylophone band 7 7065344324692443136 +xylophone band 9 -7262798781688651776 +xylophone band 11 7500716020874674176 +xylophone band 14 -7881351200983613440 +xylophone band 17 7778936842502275072 +xylophone band 19 2984 +xylophone band 22 -7551394356730339328 +xylophone band 23 7675009476762918912 +xylophone band 33 311 +xylophone band 36 -6917607783359897600 +xylophone band 38 3067 +xylophone band 40 8129551357032259584 +xylophone band 52 -7687052294777208832 +xylophone band 60 1541 +xylophone band 62 8365058996333953024 +xylophone band 77 9209153648361848832 +xylophone band 79 1307 +xylophone band 84 1398 +xylophone band 85 7592440105065308160 +xylophone band 88 584 +xylophone band 92 -7399631791131074560 +xylophone band 94 8643198489997254656 +xylophone band 107 -8357136656913686528 +xylophone band 108 2108 +xylophone band 112 7921639119138070528 +xylophone band 118 1728 +xylophone band 122 9207107990561972224 +xylophone band 123 8677794924343164928 +xylophone band 125 263 +xylophone band 127 NULL +yard duty NULL 1972 +yard duty -127 2719 +yard duty -115 8279056098670198784 +yard duty -114 3747 +yard duty -109 2791 +yard duty -103 -8659692318743314432 +yard duty -102 NULL +yard duty -100 7309156463509061632 +yard duty -98 7492436934952574976 +yard duty -93 NULL +yard duty -91 -7094189393339678720 +yard duty -88 8145745969573666816 +yard duty -86 3606 +yard duty -85 NULL +yard duty -82 -8191825921746305024 +yard duty -76 3563 +yard duty -62 71 +yard duty -61 -7541860097718902784 +yard duty -57 8059284960252731392 +yard duty -53 2843 +yard duty -51 -7686220526274502656 +yard duty -50 8220104397160169472 +yard duty -49 NULL +yard duty -48 9040958359122640896 +yard duty -46 8641221723991433216 +yard duty -45 9139805788041134080 +yard duty -44 1065 +yard duty -33 1075 +yard duty -28 -7444070205513138176 +yard duty -27 7199539820886958080 +yard duty -21 2092 +yard duty -19 8656571350884048896 +yard duty -17 NULL +yard duty -15 1481 +yard duty -1 -9012093603044245504 +yard duty 8 -7194281951646187520 +yard duty 9 -8345065519816695808 +yard duty 10 3212 +yard duty 15 8945004737083555840 +yard duty 18 3901 +yard duty 22 -7109790267244814336 +yard duty 25 1030 +yard duty 28 7220581538170413056 +yard duty 30 7394967727502467072 +yard duty 34 -7858505678035951616 +yard duty 42 3724 +yard duty 48 8780196485890555904 +yard duty 53 590 +yard duty 55 8417381121663746048 +yard duty 57 677 +yard duty 60 1781 +yard duty 64 -8084716955963252736 +yard duty 65 735 +yard duty 78 -8275337702906757120 +yard duty 86 2688 +yard duty 90 -7692192232238678016 +yard duty 102 2004 +yard duty 105 80 +yard duty 110 7153922334283776000 +zync studies NULL 8962097525980225536 +zync studies -127 3079 +zync studies -117 -9219066990552760320 +zync studies -116 1542 +zync studies -111 1681 +zync studies -105 8665969966920990720 +zync studies -103 8509508263705477120 +zync studies -102 8900545829211299840 +zync studies -94 2563 +zync studies -90 8160569434550403072 +zync studies -83 8213810702473183232 +zync studies -79 -7152177800841502720 +zync studies -78 8235179243092090880 +zync studies -68 1870 +zync studies -61 -6974654664348033024 +zync studies -58 -8859107121649893376 +zync studies -57 3213 +zync studies -54 8555948987770511360 +zync studies -52 3462 +zync studies -50 2016 +zync studies -45 -8300764106868350976 +zync studies -40 3244 +zync studies -39 8099215208813903872 +zync studies -38 2514 +zync studies -37 2412 +zync studies -36 412 +zync studies -35 9000633029632499712 +zync studies -34 579 +zync studies -33 -8086577583338061824 +zync studies -32 2540 +zync studies -27 7936149988210212864 +zync studies -26 -8453491903284994048 +zync studies -21 296 +zync studies -20 9104574294205636608 +zync studies -12 2325 +zync studies 1 3841 +zync studies 7 1127 +zync studies 9 2878 +zync studies 11 279 +zync studies 15 8731960288562044928 +zync studies 19 -8714995808835444736 +zync studies 31 2306 +zync studies 37 2979 +zync studies 48 2205 +zync studies 59 NULL +zync studies 63 -8877431933441327104 +zync studies 70 8294315622451740672 +zync studies 87 -9203942396257984512 +zync studies 90 2850 +zync studies 91 8091421389575282688 +zync studies 98 1252 +zync studies 99 1608 +zync studies 100 8536948829863198720 +zync studies 116 9073672806863790080 +zync studies 120 7845953007588401152 diff --git a/ql/src/test/results/clientpositive/vector_orderby_5.q.out b/ql/src/test/results/clientpositive/vector_orderby_5.q.out new file mode 100644 index 0000000..aa43500 --- /dev/null +++ b/ql/src/test/results/clientpositive/vector_orderby_5.q.out @@ -0,0 +1,195 @@ +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: explain +select bo, max(b) from vectortab2korc group by bo order by bo desc +PREHOOK: type: QUERY +POSTHOOK: query: explain +select bo, max(b) from vectortab2korc group by bo order by bo desc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: bo (type: boolean), b (type: bigint) + outputColumnNames: bo, b + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: max(b) + keys: bo (type: boolean) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: + + Map-reduce partition columns: _col0 (type: boolean) + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: max(VALUE._col0) + keys: KEY._col0 (type: boolean) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: boolean), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: boolean) + sort order: - + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: boolean), VALUE._col0 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 459356 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select bo, max(b) from vectortab2korc group by bo order by bo desc +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +POSTHOOK: query: select bo, max(b) from vectortab2korc group by bo order by bo desc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2korc +#### A masked pattern was here #### +true 9211455920344088576 +false 9209153648361848832 +NULL 9180098147855769600 diff --git a/ql/src/test/results/clientpositive/vector_varchar_4.q.out b/ql/src/test/results/clientpositive/vector_varchar_4.q.out new file mode 100644 index 0000000..f7c9cd0 --- /dev/null +++ b/ql/src/test/results/clientpositive/vector_varchar_4.q.out @@ -0,0 +1,202 @@ +PREHOOK: query: drop table if exists vectortab2k +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vectortab2k +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists vectortab2korc +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists vectortab2korc +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: create table vectortab2k( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@vectortab2k +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/vectortab2k' OVERWRITE INTO TABLE vectortab2k +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@vectortab2k +PREHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: create table vectortab2korc( + t tinyint, + si smallint, + i int, + b bigint, + f float, + d double, + dc decimal(38,18), + bo boolean, + s string, + s2 string, + ts timestamp, + ts2 timestamp, + dt date) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@vectortab2korc +PREHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +PREHOOK: type: QUERY +PREHOOK: Input: default@vectortab2k +PREHOOK: Output: default@vectortab2korc +POSTHOOK: query: INSERT INTO TABLE vectortab2korc SELECT * FROM vectortab2k +POSTHOOK: type: QUERY +POSTHOOK: Input: default@vectortab2k +POSTHOOK: Output: default@vectortab2korc +POSTHOOK: Lineage: vectortab2korc.b SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:b, type:bigint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.bo SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:bo, type:boolean, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.d SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:d, type:double, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dc SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dc, type:decimal(38,18), comment:null), ] +POSTHOOK: Lineage: vectortab2korc.dt SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:dt, type:date, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.f SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:f, type:float, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.i SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:i, type:int, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.s2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:s2, type:string, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.si SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:si, type:smallint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.t SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:t, type:tinyint, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: vectortab2korc.ts2 SIMPLE [(vectortab2k)vectortab2k.FieldSchema(name:ts2, type:timestamp, comment:null), ] +PREHOOK: query: drop table if exists varchar_lazy_binary_columnar +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists varchar_lazy_binary_columnar +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table varchar_lazy_binary_columnar(vt varchar(10), vsi varchar(10), vi varchar(20), vb varchar(30), vf varchar(20),vd varchar(20),vs varchar(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@varchar_lazy_binary_columnar +POSTHOOK: query: create table varchar_lazy_binary_columnar(vt varchar(10), vsi varchar(10), vi varchar(20), vb varchar(30), vf varchar(20),vd varchar(20),vs varchar(50)) row format serde 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@varchar_lazy_binary_columnar +PREHOOK: query: explain +insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table varchar_lazy_binary_columnar select t, si, i, b, f, d, s from vectortab2korc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: vectortab2korc + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( t AS varchar(10)) (type: varchar(10)), CAST( si AS varchar(10)) (type: varchar(10)), CAST( i AS varchar(20)) (type: varchar(20)), CAST( b AS varchar(30)) (type: varchar(30)), CAST( f AS varchar(20)) (type: varchar(20)), CAST( d AS varchar(20)) (type: varchar(20)), CAST( s AS varchar(50)) (type: varchar(50)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2000 Data size: 918712 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + name: default.varchar_lazy_binary_columnar + Execution mode: vectorized + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe + name: default.varchar_lazy_binary_columnar + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-5 + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + diff --git a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out index 1c77c39..1c774af 100644 --- a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out +++ b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out @@ -1,31 +1,31 @@ -PREHOOK: query: drop table char_2 +PREHOOK: query: drop table varchar_2 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table char_2 +POSTHOOK: query: drop table varchar_2 POSTHOOK: type: DROPTABLE -PREHOOK: query: create table char_2 ( +PREHOOK: query: create table varchar_2 ( key varchar(10), value varchar(20) ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@char_2 -POSTHOOK: query: create table char_2 ( +PREHOOK: Output: default@varchar_2 +POSTHOOK: query: create table varchar_2 ( key varchar(10), value varchar(20) ) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@char_2 -PREHOOK: query: insert overwrite table char_2 select * from src +POSTHOOK: Output: default@varchar_2 +PREHOOK: query: insert overwrite table varchar_2 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@char_2 -POSTHOOK: query: insert overwrite table char_2 select * from src +PREHOOK: Output: default@varchar_2 +POSTHOOK: query: insert overwrite table varchar_2 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@char_2 -POSTHOOK: Lineage: char_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -POSTHOOK: Lineage: char_2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Output: default@varchar_2 +POSTHOOK: Lineage: varchar_2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: varchar_2.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] PREHOOK: query: select key, value from src order by key asc @@ -46,12 +46,12 @@ POSTHOOK: Input: default@src 10 val_10 100 val_100 PREHOOK: query: explain select key, value -from char_2 +from varchar_2 order by key asc limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value -from char_2 +from varchar_2 order by key asc limit 5 POSTHOOK: type: QUERY @@ -64,7 +64,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: char_2 + alias: varchar_2 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: varchar(10)), value (type: varchar(20)) @@ -100,19 +100,19 @@ STAGE PLANS: PREHOOK: query: -- should match the query from src select key, value -from char_2 +from varchar_2 order by key asc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@char_2 +PREHOOK: Input: default@varchar_2 #### A masked pattern was here #### POSTHOOK: query: -- should match the query from src select key, value -from char_2 +from varchar_2 order by key asc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@char_2 +POSTHOOK: Input: default@varchar_2 #### A masked pattern was here #### 0 val_0 0 val_0 @@ -139,12 +139,12 @@ POSTHOOK: Input: default@src 97 val_97 96 val_96 PREHOOK: query: explain select key, value -from char_2 +from varchar_2 order by key desc limit 5 PREHOOK: type: QUERY POSTHOOK: query: explain select key, value -from char_2 +from varchar_2 order by key desc limit 5 POSTHOOK: type: QUERY @@ -157,7 +157,7 @@ STAGE PLANS: Map Reduce Map Operator Tree: TableScan - alias: char_2 + alias: varchar_2 Statistics: Num rows: 500 Data size: 88000 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: varchar(10)), value (type: varchar(20)) @@ -193,30 +193,125 @@ STAGE PLANS: PREHOOK: query: -- should match the query from src select key, value -from char_2 +from varchar_2 order by key desc limit 5 PREHOOK: type: QUERY -PREHOOK: Input: default@char_2 +PREHOOK: Input: default@varchar_2 #### A masked pattern was here #### POSTHOOK: query: -- should match the query from src select key, value -from char_2 +from varchar_2 order by key desc limit 5 POSTHOOK: type: QUERY -POSTHOOK: Input: default@char_2 +POSTHOOK: Input: default@varchar_2 #### A masked pattern was here #### 98 val_98 98 val_98 97 val_97 97 val_97 96 val_96 -PREHOOK: query: drop table char_2 +PREHOOK: query: drop table varchar_2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@char_2 -PREHOOK: Output: default@char_2 -POSTHOOK: query: drop table char_2 +PREHOOK: Input: default@varchar_2 +PREHOOK: Output: default@varchar_2 +POSTHOOK: query: drop table varchar_2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@char_2 -POSTHOOK: Output: default@char_2 +POSTHOOK: Input: default@varchar_2 +POSTHOOK: Output: default@varchar_2 +PREHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. +create table varchar_3 ( + field varchar(25) +) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@varchar_3 +POSTHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. +create table varchar_3 ( + field varchar(25) +) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@varchar_3 +PREHOOK: query: explain +insert into table varchar_3 select cint from alltypesorc limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert into table varchar_3 select cint from alltypesorc limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cint (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int) + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: CAST( _col0 AS varchar(25)) (type: varchar(25)) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 300 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.varchar_3 + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.varchar_3 + + Stage: Stage-2 + Stats-Aggr Operator + +PREHOOK: query: insert into table varchar_3 select cint from alltypesorc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +PREHOOK: Output: default@varchar_3 +POSTHOOK: query: insert into table varchar_3 select cint from alltypesorc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +POSTHOOK: Output: default@varchar_3 +POSTHOOK: Lineage: varchar_3.field EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] +PREHOOK: query: drop table varchar_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@varchar_3 +PREHOOK: Output: default@varchar_3 +POSTHOOK: query: drop table varchar_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@varchar_3 +POSTHOOK: Output: default@varchar_3 diff --git a/ql/src/test/results/clientpositive/vectorization_0.q.out b/ql/src/test/results/clientpositive/vectorization_0.q.out index 2aeaa13..d50899e 100644 --- a/ql/src/test/results/clientpositive/vectorization_0.q.out +++ b/ql/src/test/results/clientpositive/vectorization_0.q.out @@ -1,3 +1,1087 @@ +PREHOOK: query: -- Use ORDER BY clauses to generate 2 stages. +EXPLAIN +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: -- Use ORDER BY clauses to generate 2 stages. +EXPLAIN +SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ctinyint), max(ctinyint), count(ctinyint), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: tinyint), _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: tinyint), _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: tinyint) + sort order: + + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: tinyint), _col2 (type: bigint), _col3 (type: bigint) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: tinyint), VALUE._col0 (type: tinyint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(ctinyint) as c1, + MAX(ctinyint), + COUNT(ctinyint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-64 62 9173 12288 +PREHOOK: query: EXPLAIN +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(ctinyint) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(ctinyint) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-39856 +PREHOOK: query: EXPLAIN +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctinyint (type: tinyint) + outputColumnNames: ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(ctinyint), variance(ctinyint), var_pop(ctinyint), var_samp(ctinyint), std(ctinyint), stddev(ctinyint), stddev_pop(ctinyint), stddev_samp(ctinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + avg(ctinyint) as c1, + variance(ctinyint), + var_pop(ctinyint), + var_samp(ctinyint), + std(ctinyint), + stddev(ctinyint), + stddev_pop(ctinyint), + stddev_samp(ctinyint) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-4.344925324321378 1158.3003004768184 1158.3003004768184 1158.4265870337827 34.033811136527426 34.033811136527426 34.033811136527426 34.03566639620536 +PREHOOK: query: EXPLAIN +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: cbigint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(cbigint), max(cbigint), count(cbigint), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint), _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint), _col2 (type: bigint), _col3 (type: bigint) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: bigint), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 32 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(cbigint) as c1, + MAX(cbigint), + COUNT(cbigint), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-2147311592 2145498388 9173 12288 +PREHOOK: query: EXPLAIN +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: cbigint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(cbigint) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: bigint) + sort order: + + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(cbigint) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-1698460028409 +PREHOOK: query: EXPLAIN +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint) + outputColumnNames: cbigint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(cbigint), variance(cbigint), var_pop(cbigint), var_samp(cbigint), std(cbigint), stddev(cbigint), stddev_pop(cbigint), stddev_samp(cbigint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + avg(cbigint) as c1, + variance(cbigint), + var_pop(cbigint), + var_samp(cbigint), + std(cbigint), + stddev(cbigint), + stddev_pop(cbigint), + stddev_samp(cbigint) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-1.8515862077935246E8 2.07689300543081907E18 2.07689300543081907E18 2.07711944383088768E18 1.441142951074188E9 1.441142951074188E9 1.441142951074188E9 1.4412215110214279E9 +PREHOOK: query: EXPLAIN +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cfloat (type: float) + outputColumnNames: cfloat + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(cfloat), max(cfloat), count(cfloat), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: float), _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: float), _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: float) + sort order: + + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: float), _col2 (type: bigint), _col3 (type: bigint) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: float), VALUE._col0 (type: float), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT MIN(cfloat) as c1, + MAX(cfloat), + COUNT(cfloat), + COUNT(*) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-64.0 79.553 9173 12288 +PREHOOK: query: EXPLAIN +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cfloat (type: float) + outputColumnNames: cfloat + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(cfloat) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT SUM(cfloat) as c1 +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-39479.635992884636 +PREHOOK: query: EXPLAIN +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cfloat (type: float) + outputColumnNames: cfloat + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(cfloat), variance(cfloat), var_pop(cfloat), var_samp(cfloat), std(cfloat), stddev(cfloat), stddev_pop(cfloat), stddev_samp(cfloat) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: double) + sort order: + + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: double), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + avg(cfloat) as c1, + variance(cfloat), + var_pop(cfloat), + var_samp(cfloat), + std(cfloat), + stddev(cfloat), + stddev_pop(cfloat), + stddev_samp(cfloat) +FROM alltypesorc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc +#### A masked pattern was here #### +-4.303895780321011 1163.8972588604984 1163.8972588604984 1164.0241556397025 34.115938487171924 34.115938487171924 34.115938487171924 34.11779822379666 +WARNING: Comparing a bigint and a double may result in a loss of precision. +PREHOOK: query: EXPLAIN +SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT AVG(cbigint), + (-(AVG(cbigint))), + (-6432 + AVG(cbigint)), + STDDEV_POP(cbigint), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) + (-6432 + AVG(cbigint))), + VAR_SAMP(cbigint), + (-((-6432 + AVG(cbigint)))), + (-6432 + (-((-6432 + AVG(cbigint))))), + (-((-6432 + AVG(cbigint)))), + ((-((-6432 + AVG(cbigint)))) / (-((-6432 + AVG(cbigint))))), + COUNT(*), + SUM(cfloat), + (VAR_SAMP(cbigint) % STDDEV_POP(cbigint)), + (-(VAR_SAMP(cbigint))), + ((-((-6432 + AVG(cbigint)))) * (-(AVG(cbigint)))), + MIN(ctinyint), + (-(MIN(ctinyint))) +FROM alltypesorc +WHERE (((cstring2 LIKE '%b%') + OR ((79.553 != cint) + OR (cbigint < cdouble))) + OR ((ctinyint >= csmallint) + AND ((cboolean2 = 1) + AND (3569 = ctinyint)))) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((cstring2 like '%b%') or ((79.553 <> cint) or (cbigint < cdouble))) (type: boolean) + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint) + outputColumnNames: cbigint, cfloat, ctinyint + Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(cbigint), stddev_pop(cbigint), var_samp(cbigint), count(), sum(cfloat), min(ctinyint) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: bigint), _col4 (type: double), _col5 (type: tinyint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), stddev_pop(VALUE._col1), var_samp(VALUE._col2), count(VALUE._col3), sum(VALUE._col4), min(VALUE._col5) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), (- _col0) (type: double), (-6432 + _col0) (type: double), _col1 (type: double), (- (-6432 + _col0)) (type: double), ((- (-6432 + _col0)) + (-6432 + _col0)) (type: double), _col2 (type: double), (- (-6432 + _col0)) (type: double), (-6432 + (- (-6432 + _col0))) (type: double), (- (-6432 + _col0)) (type: double), ((- (-6432 + _col0)) / (- (-6432 + _col0))) (type: double), _col3 (type: bigint), _col4 (type: double), (_col2 % _col1) (type: double), (- _col2) (type: double), ((- (-6432 + _col0)) * (- _col0)) (type: double), _col5 (type: tinyint), (- _col5) (type: tinyint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 44 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + WARNING: Comparing a bigint and a double may result in a loss of precision. PREHOOK: query: SELECT AVG(cbigint), (-(AVG(cbigint))), diff --git a/ql/src/test/results/clientpositive/vectorization_limit.q.out b/ql/src/test/results/clientpositive/vectorization_limit.q.out index d67d559..5a31b9e 100644 --- a/ql/src/test/results/clientpositive/vectorization_limit.q.out +++ b/ql/src/test/results/clientpositive/vectorization_limit.q.out @@ -433,9 +433,11 @@ STAGE PLANS: PREHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0 PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### POSTHOOK: query: select ctinyint,cdouble from alltypesorc order by ctinyint limit 0 POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### PREHOOK: query: -- 2MR (applied to last RS) explain diff --git a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out index ef30a0c..bf4230f 100644 --- a/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out +++ b/ql/src/test/results/clientpositive/vectorized_date_funcs.q.out @@ -972,3 +972,111 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf_flight_orc #### A masked pattern was here #### 2009-07-30 +PREHOOK: query: EXPLAIN SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: date_udf_flight_orc + Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: fl_date (type: date) + outputColumnNames: fl_date + Statistics: Num rows: 137 Data size: 13152 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(fl_date), max(fl_date), count(fl_date), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: date), _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: date), _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: date) + sort order: + + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: date), VALUE._col0 (type: date), VALUE._col1 (type: bigint), VALUE._col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 128 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1 +PREHOOK: type: QUERY +PREHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +POSTHOOK: query: SELECT + min(fl_date) AS c1, + max(fl_date), + count(fl_date), + count(*) +FROM date_udf_flight_orc +ORDER BY c1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@date_udf_flight_orc +#### A masked pattern was here #### +2010-10-20 2010-10-31 137 137 diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out index 89ea70d..efde414 100644 --- a/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/vectorized_timestamp_funcs.q.out @@ -1,10 +1,16 @@ PREHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +-- Turning on vectorization has been temporarily moved after filling the test table +-- due to bug HIVE-8197. + CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alltypesorc_string POSTHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. +-- Turning on vectorization has been temporarily moved after filling the test table +-- due to bug HIVE-8197. + CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC POSTHOOK: type: CREATETABLE @@ -513,25 +519,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: alltypesorc_wrong - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: to_unix_timestamp(stimestamp1) (type: bigint), year(stimestamp1) (type: int), month(stimestamp1) (type: int), day(stimestamp1) (type: int), dayofmonth(stimestamp1) (type: int), weekofyear(stimestamp1) (type: int), hour(stimestamp1) (type: int), minute(stimestamp1) (type: int), second(stimestamp1) (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: bigint) sort order: + - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: int), _col4 (type: int), _col5 (type: int), _col6 (type: int), _col7 (type: int), _col8 (type: int) Execution mode: vectorized Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: bigint), VALUE._col0 (type: int), VALUE._col1 (type: int), VALUE._col2 (type: int), VALUE._col3 (type: int), VALUE._col4 (type: int), VALUE._col5 (type: int), VALUE._col6 (type: int), VALUE._col7 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 103 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 294 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -576,3 +582,254 @@ POSTHOOK: Input: default@alltypesorc_wrong NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +PREHOOK: query: EXPLAIN SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc_string + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctimestamp1 (type: timestamp) + outputColumnNames: ctimestamp1 + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: min(ctimestamp1), max(ctimestamp1), count(ctimestamp1), count() + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: bigint), _col3 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: timestamp), _col1 (type: timestamp), _col2 (type: bigint), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +POSTHOOK: query: SELECT + min(ctimestamp1), + max(ctimestamp1), + count(ctimestamp1), + count(*) +FROM alltypesorc_string +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +1969-12-31 23:59:44.088 1970-01-01 00:00:15.007 39 40 +PREHOOK: query: -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... +EXPLAIN SELECT + sum(ctimestamp1) +FROM alltypesorc_string +PREHOOK: type: QUERY +POSTHOOK: query: -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... +EXPLAIN SELECT + sum(ctimestamp1) +FROM alltypesorc_string +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc_string + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctimestamp1 (type: timestamp) + outputColumnNames: ctimestamp1 + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(ctimestamp1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: double) + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + sum(ctimestamp1) +FROM alltypesorc_string +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +POSTHOOK: query: SELECT + sum(ctimestamp1) +FROM alltypesorc_string +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +1123143.8569999998 +PREHOOK: query: EXPLAIN SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: alltypesorc_string + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: ctimestamp1 (type: timestamp) + outputColumnNames: ctimestamp1 + Statistics: Num rows: 40 Data size: 5694 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: avg(ctimestamp1), variance(ctimestamp1), var_pop(ctimestamp1), var_samp(ctimestamp1), std(ctimestamp1), stddev(ctimestamp1), stddev_pop(ctimestamp1), stddev_samp(ctimestamp1) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE + value expressions: _col0 (type: struct), _col1 (type: struct), _col2 (type: struct), _col3 (type: struct), _col4 (type: struct), _col5 (type: struct), _col6 (type: struct), _col7 (type: struct) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0), variance(VALUE._col1), var_pop(VALUE._col2), var_samp(VALUE._col3), std(VALUE._col4), stddev(VALUE._col5), stddev_pop(VALUE._col6), stddev_samp(VALUE._col7) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: double), _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: double), _col5 (type: double), _col6 (type: double), _col7 (type: double) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 64 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string +PREHOOK: type: QUERY +PREHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +POSTHOOK: query: SELECT + avg(ctimestamp1), + variance(ctimestamp1), + var_pop(ctimestamp1), + var_samp(ctimestamp1), + std(ctimestamp1), + stddev(ctimestamp1), + stddev_pop(ctimestamp1), + stddev_samp(ctimestamp1) +FROM alltypesorc_string +POSTHOOK: type: QUERY +POSTHOOK: Input: default@alltypesorc_string +#### A masked pattern was here #### +2.8798560435897438E13 8.970772952794212E19 8.970772952794212E19 9.206845925236166E19 9.471416447815084E9 9.471416447815084E9 9.471416447815084E9 9.595231068211002E9 diff --git a/ql/src/test/results/compiler/parse/udf6.q.out b/ql/src/test/results/compiler/parse/udf6.q.out index 795216a..4adc7ba 100644 --- a/ql/src/test/results/compiler/parse/udf6.q.out +++ b/ql/src/test/results/compiler/parse/udf6.q.out @@ -1 +1 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION CONCAT 'a' 'b')) (TOK_SELEXPR (TOK_FUNCTION IF TRUE 1 2))))) \ No newline at end of file +(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION CONCAT 'a' 'b')) (TOK_SELEXPR (+ (TOK_FUNCTION IF TRUE 1 2) (TOK_TABLE_OR_COL key)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/plan/cast1.q.xml b/ql/src/test/results/compiler/plan/cast1.q.xml index fe34e5b..e135567 100644 --- a/ql/src/test/results/compiler/plan/cast1.q.xml +++ b/ql/src/test/results/compiler/plan/cast1.q.xml @@ -378,282 +378,79 @@ _col6 - - - - - - - - - - true - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFToInteger - - - UDFToInteger - - - + + + 1 + _col5 - - - - - - - - - - 1 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFToBoolean - - - UDFToBoolean - - - + + + true + _col4 - - - - - - - - - - 3 - - - - - - - - - - - - - - 2.0 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFToInteger - - - UDFToInteger - - - - - - - - - - - - - - false - - - + + + 5 + _col3 - - - - - - - - - - 3.0 - - - - - - - - - - 2.0 - - - - - - - - - false - - - + + + 5.0 + _col2 - - - - - - - - - - 3 - - - - - - - - - - 2.0 - - - - - - - - - false - - - + + + 5.0 + _col1 - - - - - - - - - - 3.0 - - - - - - - - - - 2 - - - - - - - - - false - - - + + + 5.0 + _col0 - - - - - - - - - - 3 - - - - - - - - - - 2 - - - - - - - - - false - - - + + + 5 + @@ -663,74 +460,25 @@ - - - - - - 5 - - + - - - - - - 5.0 - - + - - - - - - 5.0 - - + - - - - - - 5.0 - - + - - - - - - 5 - - + - - - - - - true - - + - - - - - - 1 - - + diff --git a/ql/src/test/results/compiler/plan/groupby1.q.xml b/ql/src/test/results/compiler/plan/groupby1.q.xml index b895372..4f2e132 100755 --- a/ql/src/test/results/compiler/plan/groupby1.q.xml +++ b/ql/src/test/results/compiler/plan/groupby1.q.xml @@ -732,7 +732,7 @@ value - + value @@ -790,7 +790,7 @@ key - + key @@ -809,30 +809,10 @@ - - - key - - - src - - - - - + - - - value - - - src - - - - - + @@ -1233,7 +1213,7 @@ _col1 - + _col1 @@ -1247,7 +1227,7 @@ _col0 - + _col0 @@ -1266,10 +1246,10 @@ - + - + @@ -1335,7 +1315,7 @@ _col0 - + KEY._col0 @@ -1387,7 +1367,7 @@ - + diff --git a/ql/src/test/results/compiler/plan/groupby2.q.xml b/ql/src/test/results/compiler/plan/groupby2.q.xml index 46a7a5f..fe050e2 100755 --- a/ql/src/test/results/compiler/plan/groupby2.q.xml +++ b/ql/src/test/results/compiler/plan/groupby2.q.xml @@ -819,7 +819,7 @@ value - + value @@ -877,7 +877,7 @@ key - + key @@ -896,30 +896,10 @@ - - - key - - - src - - - - - + - - - value - - - src - - - - - + @@ -1397,7 +1377,7 @@ _col1 - + _col1 @@ -1411,7 +1391,7 @@ _col0 - + _col0 @@ -1430,10 +1410,10 @@ - + - + @@ -1524,7 +1504,7 @@ _col0 - + KEY._col0 @@ -1611,7 +1591,7 @@ - + diff --git a/ql/src/test/results/compiler/plan/groupby3.q.xml b/ql/src/test/results/compiler/plan/groupby3.q.xml index 5b6af21..2246a7b 100644 --- a/ql/src/test/results/compiler/plan/groupby3.q.xml +++ b/ql/src/test/results/compiler/plan/groupby3.q.xml @@ -1065,7 +1065,7 @@ value - + value @@ -1142,17 +1142,7 @@ - - - value - - - src - - - - - + @@ -1606,7 +1596,7 @@ _col4 - + _col4 @@ -1620,7 +1610,7 @@ _col3 - + _col3 @@ -1634,7 +1624,7 @@ _col2 - + _col2 @@ -1648,7 +1638,7 @@ _col1 - + _col1 @@ -1662,7 +1652,7 @@ _col0 - + _col0 @@ -1681,6 +1671,9 @@ + + + @@ -1692,9 +1685,6 @@ - - - diff --git a/ql/src/test/results/compiler/plan/groupby4.q.xml b/ql/src/test/results/compiler/plan/groupby4.q.xml index 92086c3..909f6dd 100644 --- a/ql/src/test/results/compiler/plan/groupby4.q.xml +++ b/ql/src/test/results/compiler/plan/groupby4.q.xml @@ -561,7 +561,7 @@ key - + key @@ -580,17 +580,7 @@ - - - key - - - src - - - - - + @@ -980,7 +970,7 @@ _col0 - + _col0 @@ -999,7 +989,7 @@ - + @@ -1049,7 +1039,7 @@ _col0 - + KEY._col0 @@ -1071,7 +1061,7 @@ - + diff --git a/ql/src/test/results/compiler/plan/groupby5.q.xml b/ql/src/test/results/compiler/plan/groupby5.q.xml index 26e59d9..1106075 100644 --- a/ql/src/test/results/compiler/plan/groupby5.q.xml +++ b/ql/src/test/results/compiler/plan/groupby5.q.xml @@ -584,7 +584,7 @@ value - + value @@ -642,7 +642,7 @@ key - + key @@ -661,30 +661,10 @@ - - - key - - - src - - - - - + - - - value - - - src - - - - - + @@ -1105,7 +1085,7 @@ _col1 - + _col1 @@ -1119,7 +1099,7 @@ _col0 - + _col0 @@ -1138,10 +1118,10 @@ - + - + @@ -1213,7 +1193,7 @@ _col0 - + KEY._col0 @@ -1265,7 +1245,7 @@ - + diff --git a/ql/src/test/results/compiler/plan/groupby6.q.xml b/ql/src/test/results/compiler/plan/groupby6.q.xml index 87e83c9..86c00eb 100644 --- a/ql/src/test/results/compiler/plan/groupby6.q.xml +++ b/ql/src/test/results/compiler/plan/groupby6.q.xml @@ -503,7 +503,7 @@ value - + value @@ -580,17 +580,7 @@ - - - value - - - src - - - - - + @@ -980,7 +970,7 @@ _col0 - + _col0 @@ -999,7 +989,7 @@ - + @@ -1049,7 +1039,7 @@ _col0 - + KEY._col0 @@ -1071,7 +1061,7 @@ - + diff --git a/ql/src/test/results/compiler/plan/input1.q.xml b/ql/src/test/results/compiler/plan/input1.q.xml index 239ebbc..8b5d4fc 100755 --- a/ql/src/test/results/compiler/plan/input1.q.xml +++ b/ql/src/test/results/compiler/plan/input1.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/input2.q.xml b/ql/src/test/results/compiler/plan/input2.q.xml index 7f84e15..f106d80 100755 --- a/ql/src/test/results/compiler/plan/input2.q.xml +++ b/ql/src/test/results/compiler/plan/input2.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/input3.q.xml b/ql/src/test/results/compiler/plan/input3.q.xml index 35ad155..0152d46 100755 --- a/ql/src/test/results/compiler/plan/input3.q.xml +++ b/ql/src/test/results/compiler/plan/input3.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + @@ -881,7 +822,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -979,66 +920,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest2 - - - columns.types - string:string - - - serialization.ddl - struct dest2 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + @@ -1440,7 +1322,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -1547,70 +1429,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest3 - - - columns.types - string:string - - - serialization.ddl - struct dest3 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - partition_columns.types - string:string - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/input6.q.xml b/ql/src/test/results/compiler/plan/input6.q.xml index 185844b..5a459b6 100644 --- a/ql/src/test/results/compiler/plan/input6.q.xml +++ b/ql/src/test/results/compiler/plan/input6.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/input7.q.xml b/ql/src/test/results/compiler/plan/input7.q.xml index a160585..676a72e 100644 --- a/ql/src/test/results/compiler/plan/input7.q.xml +++ b/ql/src/test/results/compiler/plan/input7.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/input9.q.xml b/ql/src/test/results/compiler/plan/input9.q.xml index 0d35af7..3b0c93b 100644 --- a/ql/src/test/results/compiler/plan/input9.q.xml +++ b/ql/src/test/results/compiler/plan/input9.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/join8.q.xml b/ql/src/test/results/compiler/plan/join8.q.xml index 2fd7a37..7ef3d43 100644 --- a/ql/src/test/results/compiler/plan/join8.q.xml +++ b/ql/src/test/results/compiler/plan/join8.q.xml @@ -1713,18 +1713,11 @@ _col2 - - - _col2 - - - - - + _col1 - + _col1 @@ -1735,7 +1728,7 @@ _col0 - + _col0 @@ -1751,15 +1744,15 @@ - - - + + + @@ -1882,7 +1875,14 @@ - + + + _col2 + + + + + @@ -1975,7 +1975,7 @@ _col3 - + VALUE._col0 @@ -1986,7 +1986,7 @@ _col2 - + KEY.reducesinkkey0 @@ -1997,7 +1997,7 @@ _col1 - + VALUE._col0 @@ -2008,7 +2008,7 @@ _col0 - + KEY.reducesinkkey0 @@ -2041,10 +2041,10 @@ 0 - + - + @@ -2052,10 +2052,10 @@ 1 - + - + diff --git a/ql/src/test/results/compiler/plan/sample2.q.xml b/ql/src/test/results/compiler/plan/sample2.q.xml index 9f8e559..1b3a948 100644 --- a/ql/src/test/results/compiler/plan/sample2.q.xml +++ b/ql/src/test/results/compiler/plan/sample2.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/sample3.q.xml b/ql/src/test/results/compiler/plan/sample3.q.xml index ebba8a3..28e3208 100644 --- a/ql/src/test/results/compiler/plan/sample3.q.xml +++ b/ql/src/test/results/compiler/plan/sample3.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/sample4.q.xml b/ql/src/test/results/compiler/plan/sample4.q.xml index 9f8e559..1b3a948 100644 --- a/ql/src/test/results/compiler/plan/sample4.q.xml +++ b/ql/src/test/results/compiler/plan/sample4.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/sample5.q.xml b/ql/src/test/results/compiler/plan/sample5.q.xml index e5eaa8d..3cfd796 100644 --- a/ql/src/test/results/compiler/plan/sample5.q.xml +++ b/ql/src/test/results/compiler/plan/sample5.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/sample6.q.xml b/ql/src/test/results/compiler/plan/sample6.q.xml index 9931f41..a4fee5d 100644 --- a/ql/src/test/results/compiler/plan/sample6.q.xml +++ b/ql/src/test/results/compiler/plan/sample6.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/sample7.q.xml b/ql/src/test/results/compiler/plan/sample7.q.xml index 42eb9cd..a5fbe9d 100644 --- a/ql/src/test/results/compiler/plan/sample7.q.xml +++ b/ql/src/test/results/compiler/plan/sample7.q.xml @@ -334,7 +334,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -432,66 +432,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - + diff --git a/ql/src/test/results/compiler/plan/udf1.q.xml b/ql/src/test/results/compiler/plan/udf1.q.xml index e34f4d1..44988ac 100644 --- a/ql/src/test/results/compiler/plan/udf1.q.xml +++ b/ql/src/test/results/compiler/plan/udf1.q.xml @@ -534,795 +534,189 @@ _col8 - - - - - - - - - - - - - - - - - - - - .* - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFRegExp - - - rlike - - - + + + true + _col7 - - - - - - - - - - ab - - - - - - - - - - a - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - - + + + false + _col6 - - - - - - - - - - ab - - - - - - - - - - _a% - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - - + + + false + _col5 - - - - - - - - - - ab - - - - - - - - - - \%\_ - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - - + + + false + _col4 - - - - - - - - - - %_ - - - - - - - - - - \%\_ - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - - + + + true + _col3 - - - - - - - - - - ab - - - - - - - - - - %a_ - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - - + + + true + _col2 - - - - - - - - - - ab - - - - - - - - - - %a% - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - - + + + true + _col1 - - - - - - - - - - b - - - - - - - - - - %a% - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - - + + + false + _col9 - - - - - - - - - - a - - - - - - - - - - [ab] - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFRegExp - - - rlike - - - + + + true + _col13 - - - - - - - - - - abc - - - - - - - - - - b - - - - - - - - - - c - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFRegExpReplace - - - regexp_replace - - - + + + acc + _col12 - - - - - - - - - - hadoop - - - - - - - - - - o* - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFRegExp - - - rlike - - - + + + true + _col11 - - - - - - - - - - hadoop - - - - - - - - - - [a-z]* - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFRegExp - - - rlike - - - + + + true + _col10 - - - - - - - - - - - - - - - - - - - - [ab] - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFRegExp - - - rlike - - - + + + false + _col16 - - - - - - - - - - hadoop - - - - - - - - - - (.)[a-z]* - - - - - - - - - - $1ive - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFRegExpReplace - - - regexp_replace - - - + + + hive + _col15 - - - - - - - - - - abbbb - - - - - - - - - - bb - - - - - - - - - - b - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFRegExpReplace - - - regexp_replace - - - + + + abb + _col14 - - - - - - - - - - abc - - - - - - - - - - z - - - - - - - - - - a - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFRegExpReplace - - - regexp_replace - - - + + + abc + _col0 - - - - - - - - - - a - - - - - - - - - - %a% - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFLike - - - like - - - + + + true + @@ -1332,174 +726,55 @@ - - - - - - true - - + - - - - - - false - - + - - - - - - true - - + - - - - - - true - - + - - - - - - true - - + - - - - - - false - - + - - - - - - false - - + - - - - - - false - - + - - - - - - true - - + - - - - - - true - - + - - - - - - false - - + - - - - - - true - - + - - - - - - true - - + - - - - - - acc - - + - - - - - - abc - - + - - - - - - abb - - + - - - - - - hive - - + diff --git a/ql/src/test/results/compiler/plan/udf4.q.xml b/ql/src/test/results/compiler/plan/udf4.q.xml index d6dab9e..4ca78bf 100644 --- a/ql/src/test/results/compiler/plan/udf4.q.xml +++ b/ql/src/test/results/compiler/plan/udf4.q.xml @@ -547,290 +547,94 @@ _col8 - - - - - - - - - - 0.0 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSqrt - - - sqrt - - - + + + 0.0 + _col7 - - - - - - - - - - - - - - 1.0 - - - - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSqrt - - - sqrt - - - - - - - + _col6 - - - - - - - - - - 1.0 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSqrt - - - sqrt - - - + + + 1.0 + _col5 - - - - - - - - - - - - - - 1.5 - - - - - - - - - - - - - - - - - - + + + -2 + _col4 - - - - - - - - - - 1.5 - - - - - - - - + + + 1 + _col3 - - - - - - - - - - 1.0 - - - - - - - - + + + 1 + _col2 - - - - - - - - - - - - - - 1.5 - - - - - - - - - - - - - - - - - - + + + -2.0 + _col1 - - - - - - - - - - 1.5 - - - - - - - - + + + 2.0 + _col9 - - - - - - - - - - 1.0 - - - - - - - - + + + 1 + @@ -867,213 +671,73 @@ _col12 - - - - - - - - - - 1.0 - - - - - - - - + + + 1 + _col11 - - - - - - - - - - - - - - 1.5 - - - - - - - - - - - - - - - - - - + + + -1 + _col10 - - - - - - - - - - 1.5 - - - - - - - - + + + 2 + _col17 - - - - - - - - - - 1 - - - - - - - - - - - - - - 2 - - - - - - - - - - - - - - - - - - - false - - - + + + -1 + _col16 - - - - - - - - - - 1 - - - - - - - - - - 2 - - - - - - - - - false - - - + + + 3 + _col15 - - - - - - - - - - 3 - - - - - - - - + + + -3 + _col14 - + @@ -1084,62 +748,24 @@ _col0 - - - - - - - - - - 1.0 - - - - - - - - + + + 1.0 + _col18 - - - - - - - - - - 1 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitNot - - - ~ - - - + + + -2 + @@ -1149,173 +775,61 @@ - - - - - - 1.0 - - + - - - - - - 2.0 - - + - - - - - - -2.0 - - + - - - - - - 1 - - + - - - - - - 1 - - + - - - - - - -2 - - + - - - - - - 1.0 - - + - + - - - - - - 0.0 - - + - - - - - - 1 - - + - - - - - - 2 - - + - - - - - - -1 - - + - - - - - - 1 - - + - + - - - - - - -3 - - + - - - - - - 3 - - + - - - - - - -1 - - + - - - - - - -2 - - + diff --git a/ql/src/test/results/compiler/plan/udf6.q.xml b/ql/src/test/results/compiler/plan/udf6.q.xml index 37b969c..24008df 100644 --- a/ql/src/test/results/compiler/plan/udf6.q.xml +++ b/ql/src/test/results/compiler/plan/udf6.q.xml @@ -215,7 +215,7 @@ columns.types - string:int + string:double escape.delim @@ -272,12 +272,12 @@ - int + double - int + double @@ -291,47 +291,44 @@ _col1 - + - + - boolean + int - true + 1 - - - + + + key - - 1 + + src - - - - - - - - 2 + - + + + false + + @@ -340,37 +337,13 @@ _col0 - - - - - - - - - - a - - - - - - - - - - b - - - - - - - - + + + ab + @@ -380,24 +353,10 @@ - - - - - - ab - - + - - - - - - 1 - - + @@ -452,7 +411,7 @@ - int + double @@ -468,10 +427,18 @@ src - + + + 0 + + - + + + key + + @@ -482,7 +449,11 @@ TS_0 - + + + key + + @@ -531,7 +502,7 @@ src - + bigint @@ -590,13 +561,13 @@ - + - + - + diff --git a/ql/src/test/results/compiler/plan/udf_case.q.xml b/ql/src/test/results/compiler/plan/udf_case.q.xml index 726f290..dc620cb 100644 --- a/ql/src/test/results/compiler/plan/udf_case.q.xml +++ b/ql/src/test/results/compiler/plan/udf_case.q.xml @@ -346,142 +346,17 @@ _col1 - - - - - - - - - - 11 - - - - - - - - - - 12 - - - - - - - - - - 13 - - - - - - - - - - 14 - - - - - - - - - - 15 - - - - - - - - - - - - + _col0 - - - - - - - - - - 1 - - - - - - - - - - 1 - - - - - - - - - - 2 - - - - - - - - - - 3 - - - - - - - - - - 4 - - - - - - - - - - 5 - - - - - - - - + + + 2 + @@ -491,17 +366,10 @@ - - - - - - 2 - - + - + diff --git a/ql/src/test/results/compiler/plan/udf_when.q.xml b/ql/src/test/results/compiler/plan/udf_when.q.xml index 4179e6b..dc620cb 100644 --- a/ql/src/test/results/compiler/plan/udf_when.q.xml +++ b/ql/src/test/results/compiler/plan/udf_when.q.xml @@ -346,222 +346,17 @@ _col1 - - - - - - - - - - - - - - 12 - - - - - - - - - - 11 - - - - - - - - - - - - boolean - - - - - - - - - - - - 13 - - - - - - - - - - - - - - 14 - - - - - - - - - - 10 - - - - - - - - - - - - - - - - - - - - 15 - - - - - - - - - - - - + _col0 - - - - - - - - - - - - - - 1 - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - - 2 - - - - - - - - - - - - - - 3 - - - - - - - - - - 5 - - - - - - - - - - - - - - - - - - - - 4 - - - - - - - - - - 5 - - - - - - - - + + + 2 + @@ -571,17 +366,10 @@ - - - - - - 2 - - + - + @@ -649,7 +437,7 @@ src - + string @@ -669,7 +457,7 @@ src - + string @@ -688,7 +476,7 @@ src - + bigint @@ -711,7 +499,7 @@ src - + string @@ -747,13 +535,13 @@ - + - + diff --git a/ql/src/test/results/compiler/plan/union.q.xml b/ql/src/test/results/compiler/plan/union.q.xml index 2808b05..e09bee6 100644 --- a/ql/src/test/results/compiler/plan/union.q.xml +++ b/ql/src/test/results/compiler/plan/union.q.xml @@ -235,7 +235,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -449,7 +449,7 @@ - + org.apache.hadoop.mapred.TextInputFormat @@ -593,7 +593,7 @@ - + @@ -640,34 +640,7 @@ true - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - - + 1 @@ -1634,7 +1607,7 @@ - + diff --git a/serde/pom.xml b/serde/pom.xml index 9f327f0..b7bc4f0 100644 --- a/serde/pom.xml +++ b/serde/pom.xml @@ -78,6 +78,12 @@ + com.google.code.tempus-fugit + tempus-fugit + ${tempus-fugit.version} + test + + junit junit ${junit.version} diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java index 688b072..29262ba 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroDeserializer.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.rmi.server.UID; +import java.sql.Date; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -42,7 +43,10 @@ import org.apache.avro.util.Utf8; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.common.type.HiveVarchar; +import org.apache.hadoop.hive.serde2.io.DateWritable; import org.apache.hadoop.hive.serde2.objectinspector.StandardUnionObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveDecimalObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -200,7 +204,6 @@ private Object worker(Object datum, Schema fileSchema, Schema recordSchema, Type return deserializeNullableUnion(datum, fileSchema, recordSchema, columnType); } - switch(columnType.getCategory()) { case STRUCT: return deserializeStruct((GenericData.Record) datum, fileSchema, (StructTypeInfo) columnType); @@ -249,6 +252,42 @@ private Object deserializePrimitive(Object datum, Schema fileSchema, Schema reco JavaHiveDecimalObjectInspector oi = (JavaHiveDecimalObjectInspector) PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector((DecimalTypeInfo)columnType); return oi.set(null, dec); + case CHAR: + if (fileSchema == null) { + throw new AvroSerdeException("File schema is missing for char field. Reader schema is " + columnType); + } + + int maxLength = 0; + try { + maxLength = fileSchema.getJsonProp(AvroSerDe.AVRO_PROP_MAX_LENGTH).getValueAsInt(); + } catch (Exception ex) { + throw new AvroSerdeException("Failed to obtain maxLength value for char field from file schema: " + fileSchema, ex); + } + + String str = datum.toString(); + HiveChar hc = new HiveChar(str, maxLength); + return hc; + case VARCHAR: + if (fileSchema == null) { + throw new AvroSerdeException("File schema is missing for varchar field. Reader schema is " + columnType); + } + + maxLength = 0; + try { + maxLength = fileSchema.getJsonProp(AvroSerDe.AVRO_PROP_MAX_LENGTH).getValueAsInt(); + } catch (Exception ex) { + throw new AvroSerdeException("Failed to obtain maxLength value for varchar field from file schema: " + fileSchema, ex); + } + + str = datum.toString(); + HiveVarchar hvc = new HiveVarchar(str, maxLength); + return hvc; + case DATE: + if (recordSchema.getType() != Type.INT) { + throw new AvroSerdeException("Unexpected Avro schema for Date TypeInfo: " + recordSchema.getType()); + } + + return new Date(DateWritable.daysToMillis((Integer)datum)); default: return datum; } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java index 69545b0..b93121d 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java @@ -41,9 +41,15 @@ private static final Log LOG = LogFactory.getLog(AvroSerDe.class); public static final String DECIMAL_TYPE_NAME = "decimal"; + public static final String CHAR_TYPE_NAME = "char"; + public static final String VARCHAR_TYPE_NAME = "varchar"; + public static final String DATE_TYPE_NAME = "date"; public static final String AVRO_PROP_LOGICAL_TYPE = "logicalType"; public static final String AVRO_PROP_PRECISION = "precision"; public static final String AVRO_PROP_SCALE = "scale"; + public static final String AVRO_PROP_MAX_LENGTH = "maxLength"; + public static final String AVRO_STRING_TYPE_NAME = "string"; + public static final String AVRO_INT_TYPE_NAME = "int"; private ObjectInspector oi; private List columnNames; diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerializer.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerializer.java index 2bd48ca..c8eac89 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerializer.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerializer.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.serde2.avro; +import java.sql.Date; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -30,7 +31,10 @@ import org.apache.avro.generic.GenericEnumSymbol; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.common.type.HiveVarchar; +import org.apache.hadoop.hive.serde2.io.DateWritable; import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -38,6 +42,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.UnionObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.DateObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; @@ -193,6 +198,15 @@ private Object serializePrimitive(TypeInfo typeInfo, PrimitiveObjectInspector fi case DECIMAL: HiveDecimal dec = (HiveDecimal)fieldOI.getPrimitiveJavaObject(structFieldData); return AvroSerdeUtils.getBufferFromDecimal(dec, ((DecimalTypeInfo)typeInfo).scale()); + case CHAR: + HiveChar ch = (HiveChar)fieldOI.getPrimitiveJavaObject(structFieldData); + return ch.getStrippedValue(); + case VARCHAR: + HiveVarchar vc = (HiveVarchar)fieldOI.getPrimitiveJavaObject(structFieldData); + return vc.getValue(); + case DATE: + Date date = ((DateObjectInspector)fieldOI).getPrimitiveJavaObject(structFieldData); + return DateWritable.dateToDays(date); case UNKNOWN: throw new AvroSerdeException("Received UNKNOWN primitive category."); case VOID: diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/SchemaToTypeInfo.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/SchemaToTypeInfo.java index 23e024f..c84b1a0 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/SchemaToTypeInfo.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/SchemaToTypeInfo.java @@ -128,6 +128,33 @@ public static TypeInfo generateTypeInfo(Schema schema) throws AvroSerdeException return TypeInfoFactory.getDecimalTypeInfo(precision, scale); } + if (type == Schema.Type.STRING && + AvroSerDe.CHAR_TYPE_NAME.equalsIgnoreCase(schema.getProp(AvroSerDe.AVRO_PROP_LOGICAL_TYPE))) { + int maxLength = 0; + try { + maxLength = schema.getJsonProp(AvroSerDe.AVRO_PROP_MAX_LENGTH).getValueAsInt(); + } catch (Exception ex) { + throw new AvroSerdeException("Failed to obtain maxLength value from file schema: " + schema, ex); + } + return TypeInfoFactory.getCharTypeInfo(maxLength); + } + + if (type == Schema.Type.STRING && + AvroSerDe.VARCHAR_TYPE_NAME.equalsIgnoreCase(schema.getProp(AvroSerDe.AVRO_PROP_LOGICAL_TYPE))) { + int maxLength = 0; + try { + maxLength = schema.getJsonProp(AvroSerDe.AVRO_PROP_MAX_LENGTH).getValueAsInt(); + } catch (Exception ex) { + throw new AvroSerdeException("Failed to obtain maxLength value from file schema: " + schema, ex); + } + return TypeInfoFactory.getVarcharTypeInfo(maxLength); + } + + if (type == Schema.Type.INT && + AvroSerDe.DATE_TYPE_NAME.equals(schema.getProp(AvroSerDe.AVRO_PROP_LOGICAL_TYPE))) { + return TypeInfoFactory.dateTypeInfo; + } + return typeInfoCache.retrieve(schema); } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/TypeInfoToSchema.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/TypeInfoToSchema.java index 4169558..8cb2dc3 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/TypeInfoToSchema.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/TypeInfoToSchema.java @@ -19,6 +19,7 @@ import org.apache.avro.Schema; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; @@ -26,6 +27,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.UnionTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.node.JsonNodeFactory; @@ -105,10 +107,16 @@ private Schema createAvroPrimitive(TypeInfo typeInfo) { schema = Schema.create(Schema.Type.STRING); break; case CHAR: - schema = Schema.create(Schema.Type.STRING); + schema = AvroSerdeUtils.getSchemaFor("{" + + "\"type\":\"" + AvroSerDe.AVRO_STRING_TYPE_NAME + "\"," + + "\"logicalType\":\"" + AvroSerDe.CHAR_TYPE_NAME + "\"," + + "\"maxLength\":" + ((CharTypeInfo) typeInfo).getLength() + "}"); break; case VARCHAR: - schema = Schema.create(Schema.Type.STRING); + schema = AvroSerdeUtils.getSchemaFor("{" + + "\"type\":\"" + AvroSerDe.AVRO_STRING_TYPE_NAME + "\"," + + "\"logicalType\":\"" + AvroSerDe.VARCHAR_TYPE_NAME + "\"," + + "\"maxLength\":" + ((VarcharTypeInfo) typeInfo).getLength() + "}"); break; case BINARY: schema = Schema.create(Schema.Type.BYTES); @@ -144,6 +152,11 @@ private Schema createAvroPrimitive(TypeInfo typeInfo) { "\"precision\":" + precision + "," + "\"scale\":" + scale + "}"); break; + case DATE: + schema = AvroSerdeUtils.getSchemaFor("{" + + "\"type\":\"" + AvroSerDe.AVRO_INT_TYPE_NAME + "\"," + + "\"logicalType\":\"" + AvroSerDe.DATE_TYPE_NAME + "\"}"); + break; case VOID: schema = Schema.create(Schema.Type.NULL); break; diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveCharObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveCharObjectInspector.java index d16e313..2baceed 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveCharObjectInspector.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveCharObjectInspector.java @@ -21,6 +21,11 @@ import org.apache.hadoop.hive.serde2.io.HiveCharWritable; import org.apache.hadoop.hive.serde2.typeinfo.BaseCharUtils; import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; +import org.apache.hadoop.io.Text; + +import java.nio.ByteBuffer; +import java.nio.CharBuffer; +import java.nio.charset.Charset; public class WritableHiveCharObjectInspector extends AbstractPrimitiveWritableObjectInspector implements SettableHiveCharObjectInspector { @@ -39,6 +44,12 @@ public HiveChar getPrimitiveJavaObject(Object o) { if (o == null) { return null; } + + if (o instanceof Text) { + String str = ((Text)o).toString(); + return new HiveChar(str, ((CharTypeInfo)typeInfo).getLength()); + } + HiveCharWritable writable = ((HiveCharWritable) o); if (doesWritableMatchTypeParams(writable)) { return writable.getHiveChar(); @@ -53,6 +64,14 @@ public HiveCharWritable getPrimitiveWritableObject(Object o) { if (o == null) { return null; } + + if (o instanceof Text) { + String str = ((Text)o).toString(); + HiveCharWritable hcw = new HiveCharWritable(); + hcw.set(str, ((CharTypeInfo)typeInfo).getLength()); + return hcw; + } + HiveCharWritable writable = ((HiveCharWritable) o); if (doesWritableMatchTypeParams((HiveCharWritable) o)) { return writable; diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveVarcharObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveVarcharObjectInspector.java index 28c9080..e723878 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveVarcharObjectInspector.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableHiveVarcharObjectInspector.java @@ -19,10 +19,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveVarchar; +import org.apache.hadoop.hive.serde2.io.HiveCharWritable; import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable; +import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.BaseCharUtils; +import org.apache.hadoop.io.Text; +import org.apache.hive.common.util.HiveStringUtils; public class WritableHiveVarcharObjectInspector extends AbstractPrimitiveWritableObjectInspector implements SettableHiveVarcharObjectInspector { @@ -43,6 +48,12 @@ public HiveVarchar getPrimitiveJavaObject(Object o) { if (o == null) { return null; } + + if (o instanceof Text) { + String str = ((Text)o).toString(); + return new HiveVarchar(str, ((VarcharTypeInfo)typeInfo).getLength()); + } + HiveVarcharWritable writable = ((HiveVarcharWritable)o); if (doesWritableMatchTypeParams(writable)) { return writable.getHiveVarchar(); @@ -57,6 +68,14 @@ public HiveVarcharWritable getPrimitiveWritableObject(Object o) { if (o == null) { return null; } + + if (o instanceof Text) { + String str = ((Text)o).toString(); + HiveVarcharWritable hcw = new HiveVarcharWritable(); + hcw.set(str, ((VarcharTypeInfo)typeInfo).getLength()); + return hcw; + } + HiveVarcharWritable writable = ((HiveVarcharWritable)o); if (doesWritableMatchTypeParams((HiveVarcharWritable)o)) { return writable; diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/HiveDecimalUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/HiveDecimalUtils.java index cc75491..aa9e37a 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/HiveDecimalUtils.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/HiveDecimalUtils.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; public class HiveDecimalUtils { @@ -134,4 +135,25 @@ public static int getScaleForType(PrimitiveTypeInfo typeInfo) { } } + public static TypeInfo getDecimalTypeForPrimitiveCategories( + PrimitiveTypeInfo a, PrimitiveTypeInfo b) { + int prec1 = HiveDecimalUtils.getPrecisionForType(a); + int prec2 = HiveDecimalUtils.getPrecisionForType(b); + int scale1 = HiveDecimalUtils.getScaleForType(a); + int scale2 = HiveDecimalUtils.getScaleForType(b); + int intPart = Math.max(prec1 - scale1, prec2 - scale2); + int decPart = Math.max(scale1, scale2); + int prec = Math.min(intPart + decPart, HiveDecimal.MAX_PRECISION); + int scale = Math.min(decPart, HiveDecimal.MAX_PRECISION - intPart); + return TypeInfoFactory.getDecimalTypeInfo(prec, scale); + } + + public static DecimalTypeInfo getDecimalTypeForPrimitiveCategory(PrimitiveTypeInfo a) { + if (a instanceof DecimalTypeInfo) return (DecimalTypeInfo)a; + int prec = HiveDecimalUtils.getPrecisionForType(a); + int scale = HiveDecimalUtils.getScaleForType(a); + prec = Math.min(prec, HiveDecimal.MAX_PRECISION); + scale = Math.min(scale, HiveDecimal.MAX_PRECISION - (prec - scale)); + return TypeInfoFactory.getDecimalTypeInfo(prec, scale); + } } diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestTypeInfoToSchema.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestTypeInfoToSchema.java index 0f53e31..c6b5cb6 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestTypeInfoToSchema.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestTypeInfoToSchema.java @@ -19,9 +19,11 @@ package org.apache.hadoop.hive.serde2.avro; import com.google.common.io.Resources; + import org.junit.Assert; import org.apache.avro.Schema; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; @@ -44,16 +46,28 @@ private static Logger LOGGER = Logger.getLogger(TestTypeInfoToSchema.class); private static final List COLUMN_NAMES = Arrays.asList("testCol"); - private static final TypeInfo STRING = TypeInfoFactory.getPrimitiveTypeInfo("string"); - private static final TypeInfo INT = TypeInfoFactory.getPrimitiveTypeInfo("int"); - private static final TypeInfo BOOLEAN = TypeInfoFactory.getPrimitiveTypeInfo("boolean"); - private static final TypeInfo LONG = TypeInfoFactory.getPrimitiveTypeInfo("bigint"); - private static final TypeInfo FLOAT = TypeInfoFactory.getPrimitiveTypeInfo("float"); - private static final TypeInfo DOUBLE = TypeInfoFactory.getPrimitiveTypeInfo("double"); - private static final TypeInfo BINARY = TypeInfoFactory.getPrimitiveTypeInfo("binary"); - private static final TypeInfo BYTE = TypeInfoFactory.getPrimitiveTypeInfo("tinyint"); - private static final TypeInfo SHORT = TypeInfoFactory.getPrimitiveTypeInfo("smallint"); - private static final TypeInfo VOID = TypeInfoFactory.getPrimitiveTypeInfo("void"); + private static final TypeInfo STRING = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.STRING_TYPE_NAME); + private static final TypeInfo INT = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.INT_TYPE_NAME); + private static final TypeInfo BOOLEAN = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.BOOLEAN_TYPE_NAME); + private static final TypeInfo LONG = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.BIGINT_TYPE_NAME); + private static final TypeInfo FLOAT = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.FLOAT_TYPE_NAME); + private static final TypeInfo DOUBLE = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.DOUBLE_TYPE_NAME); + private static final TypeInfo BINARY = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.BINARY_TYPE_NAME); + private static final TypeInfo BYTE = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.TINYINT_TYPE_NAME); + private static final TypeInfo SHORT = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.SMALLINT_TYPE_NAME); + private static final TypeInfo VOID = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.VOID_TYPE_NAME); + private static final TypeInfo DATE = TypeInfoFactory.getPrimitiveTypeInfo( + serdeConstants.DATE_TYPE_NAME); private static final int PRECISION = 4; private static final int SCALE = 2; private static final TypeInfo DECIMAL = TypeInfoFactory.getPrimitiveTypeInfo( @@ -205,6 +219,41 @@ public void createAvroDecimalSchema() { } @Test + public void createAvroCharSchema() { + final String specificSchema = "{" + + "\"type\":\"string\"," + + "\"logicalType\":\"char\"," + + "\"maxLength\":" + CHAR_LEN + "}"; + String expectedSchema = genSchema(specificSchema); + + Assert.assertEquals("Test for char's avro schema failed", + expectedSchema, getAvroSchemaString(CHAR)); + } + + @Test + public void createAvroVarcharSchema() { + final String specificSchema = "{" + + "\"type\":\"string\"," + + "\"logicalType\":\"varchar\"," + + "\"maxLength\":" + CHAR_LEN + "}"; + String expectedSchema = genSchema(specificSchema); + + Assert.assertEquals("Test for varchar's avro schema failed", + expectedSchema, getAvroSchemaString(VARCHAR)); + } + + @Test + public void createAvroDateSchema() { + final String specificSchema = "{" + + "\"type\":\"int\"," + + "\"logicalType\":\"date\"}"; + String expectedSchema = genSchema(specificSchema); + + Assert.assertEquals("Test for date in avro schema failed", + expectedSchema, getAvroSchemaString(DATE)); + } + + @Test public void createAvroListSchema() { ListTypeInfo listTypeInfo = new ListTypeInfo(); listTypeInfo.setListElementTypeInfo(STRING); @@ -313,6 +362,7 @@ public void createAvroStructSchema() throws IOException { names.add("field11"); names.add("field12"); names.add("field13"); + names.add("field14"); structTypeInfo.setAllStructFieldNames(names); ArrayList typeInfos = new ArrayList(); typeInfos.add(STRING); @@ -327,6 +377,7 @@ public void createAvroStructSchema() throws IOException { typeInfos.add(DOUBLE); typeInfos.add(BOOLEAN); typeInfos.add(DECIMAL); + typeInfos.add(DATE); typeInfos.add(VOID); structTypeInfo.setAllStructFieldTypeInfos(typeInfos); LOGGER.info("structTypeInfo is " + structTypeInfo); diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestDateWritable.java b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestDateWritable.java new file mode 100644 index 0000000..75de0a6 --- /dev/null +++ b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestDateWritable.java @@ -0,0 +1,138 @@ +package org.apache.hadoop.hive.serde2.io; + +import com.google.code.tempusfugit.concurrency.annotations.*; +import com.google.code.tempusfugit.concurrency.*; +import org.junit.*; + +import static org.junit.Assert.*; +import java.io.*; +import java.sql.Date; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.Calendar; + +public class TestDateWritable { + + @Rule public ConcurrentRule concurrentRule = new ConcurrentRule(); + @Rule public RepeatingRule repeatingRule = new RepeatingRule(); + + @Test + @Concurrent(count=4) + @Repeating(repetition=100) + public void testConstructor() { + Date date = Date.valueOf(getRandomDateString()); + DateWritable dw1 = new DateWritable(date); + DateWritable dw2 = new DateWritable(dw1); + DateWritable dw3 = new DateWritable(dw1.getDays()); + + assertEquals(dw1, dw1); + assertEquals(dw1, dw2); + assertEquals(dw2, dw3); + assertEquals(date, dw1.get()); + assertEquals(date, dw2.get()); + assertEquals(date, dw3.get()); + } + + @Test + @Concurrent(count=4) + @Repeating(repetition=100) + public void testComparison() { + // Get 2 different dates + Date date1 = Date.valueOf(getRandomDateString()); + Date date2 = Date.valueOf(getRandomDateString()); + while (date1.equals(date2)) { + date2 = Date.valueOf(getRandomDateString()); + } + + DateWritable dw1 = new DateWritable(date1); + DateWritable dw2 = new DateWritable(date2); + DateWritable dw3 = new DateWritable(date1); + + assertTrue("Dates should be equal", dw1.equals(dw1)); + assertTrue("Dates should be equal", dw1.equals(dw3)); + assertTrue("Dates should be equal", dw3.equals(dw1)); + assertEquals("Dates should be equal", 0, dw1.compareTo(dw1)); + assertEquals("Dates should be equal", 0, dw1.compareTo(dw3)); + assertEquals("Dates should be equal", 0, dw3.compareTo(dw1)); + + assertFalse("Dates not should be equal", dw1.equals(dw2)); + assertFalse("Dates not should be equal", dw2.equals(dw1)); + assertTrue("Dates not should be equal", 0 != dw1.compareTo(dw2)); + assertTrue("Dates not should be equal", 0 != dw2.compareTo(dw1)); + } + + @Test + @Concurrent(count=4) + @Repeating(repetition=100) + public void testGettersSetters() { + Date date1 = Date.valueOf(getRandomDateString()); + Date date2 = Date.valueOf(getRandomDateString()); + Date date3 = Date.valueOf(getRandomDateString()); + DateWritable dw1 = new DateWritable(date1); + DateWritable dw2 = new DateWritable(date2); + DateWritable dw3 = new DateWritable(date3); + DateWritable dw4 = new DateWritable(); + + // Getters + assertEquals(date1, dw1.get()); + assertEquals(date1.getTime() / 1000, dw1.getTimeInSeconds()); + + dw4.set(Date.valueOf("1970-01-02")); + assertEquals(1, dw4.getDays()); + dw4.set(Date.valueOf("1971-01-01")); + assertEquals(365, dw4.getDays()); + + // Setters + dw4.set(dw1.getDays()); + assertEquals(dw1, dw4); + + dw4.set(dw2.get()); + assertEquals(dw2, dw4); + + dw4.set(dw3); + assertEquals(dw3, dw4); + } + + @Test + @Concurrent(count=4) + @Repeating(repetition=100) + public void testWritableMethods() throws Throwable { + DateWritable dw1 = new DateWritable(Date.valueOf(getRandomDateString())); + DateWritable dw2 = new DateWritable(); + ByteArrayOutputStream byteStream = new ByteArrayOutputStream(); + DataOutput out = new DataOutputStream(byteStream); + + dw1.write(out); + dw2.readFields(new DataInputStream(new ByteArrayInputStream(byteStream.toByteArray()))); + + assertEquals("Dates should be equal", dw1, dw2); + } + + @Test + @Concurrent(count=4) + @Repeating(repetition=100) + public void testDateValueOf() { + // Just making sure Date.valueOf() works ok + String dateStr = getRandomDateString(); + Date date = Date.valueOf(dateStr); + assertEquals(dateStr, date.toString()); + } + + private static String[] dateStrings = new String[365]; + + @BeforeClass + public static void setupDateStrings() { + DateFormat format = new SimpleDateFormat("yyyy-MM-dd"); + Date initialDate = Date.valueOf("2014-01-01"); + Calendar cal = Calendar.getInstance(); + cal.setTime(initialDate); + for (int idx = 0; idx < 365; ++idx) { + dateStrings[idx] = format.format(cal.getTime()); + cal.add(1, Calendar.DAY_OF_YEAR); + } + } + + private static String getRandomDateString() { + return dateStrings[(int) (Math.random() * 365)]; + } +} diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveCharWritable.java b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveCharWritable.java index c8bb311..ee4292d 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveCharWritable.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveCharWritable.java @@ -18,10 +18,20 @@ package org.apache.hadoop.hive.serde2.io; -import junit.framework.TestCase; +import com.google.code.tempusfugit.concurrency.annotations.*; +import com.google.code.tempusfugit.concurrency.*; +import org.junit.*; + +import static org.junit.Assert.*; import org.apache.hadoop.hive.common.type.HiveChar; -public class TestHiveCharWritable extends TestCase { +public class TestHiveCharWritable { + @Rule public ConcurrentRule concurrentRule = new ConcurrentRule(); + @Rule public RepeatingRule repeatingRule = new RepeatingRule(); + + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testConstructor() throws Exception { HiveCharWritable hcw1 = new HiveCharWritable(new HiveChar("abc", 5)); assertEquals("abc ", hcw1.toString()); @@ -30,6 +40,9 @@ public void testConstructor() throws Exception { assertEquals("abc ", hcw2.toString()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testSet() throws Exception { HiveCharWritable hcw1 = new HiveCharWritable(); @@ -70,18 +83,27 @@ public void testSet() throws Exception { assertEquals("ab", hcw1.getTextValue().toString()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testGetHiveChar() throws Exception { HiveCharWritable hcw = new HiveCharWritable(); hcw.set("abcd", 10); assertEquals("abcd ", hcw.getHiveChar().toString()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testGetCharacterLength() throws Exception { HiveCharWritable hcw = new HiveCharWritable(); hcw.set("abcd", 10); assertEquals(4, hcw.getCharacterLength()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testEnforceMaxLength() { HiveCharWritable hcw1 = new HiveCharWritable(); hcw1.set("abcdefghij", 10); @@ -92,6 +114,9 @@ public void testEnforceMaxLength() { assertEquals("abcde", hcw1.toString()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testComparison() throws Exception { HiveCharWritable hcw1 = new HiveCharWritable(); HiveCharWritable hcw2 = new HiveCharWritable(); diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveDecimalWritable.java b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveDecimalWritable.java index 849646b..3b12514 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveDecimalWritable.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveDecimalWritable.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hive.serde2.io; -import junit.framework.Assert; +import com.google.code.tempusfugit.concurrency.annotations.*; +import com.google.code.tempusfugit.concurrency.*; +import org.junit.*; +import static org.junit.Assert.*; import java.math.BigDecimal; import java.math.BigInteger; @@ -29,8 +32,6 @@ import org.apache.hadoop.hive.common.type.Decimal128; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hive.common.util.Decimal128FastBuffer; -import org.junit.Before; -import org.junit.Test; /** * Unit tests for tsting the fast allocation-free conversion @@ -38,14 +39,15 @@ */ public class TestHiveDecimalWritable { - private Decimal128FastBuffer scratch; + @Rule public ConcurrentRule concurrentRule = new ConcurrentRule(); + @Rule public RepeatingRule repeatingRule = new RepeatingRule(); @Before public void setUp() throws Exception { - scratch = new Decimal128FastBuffer(); } private void doTestFastStreamForHiveDecimal(String valueString) { + Decimal128FastBuffer scratch = new Decimal128FastBuffer(); BigDecimal value = new BigDecimal(valueString); Decimal128 dec = new Decimal128(); dec.update(value); @@ -61,21 +63,23 @@ private void doTestFastStreamForHiveDecimal(String valueString) { BigDecimal readValue = hd.bigDecimalValue(); - Assert.assertEquals(value, readValue); + assertEquals(value, readValue); // Now test fastUpdate from the same serialized HiveDecimal Decimal128 decRead = new Decimal128().fastUpdateFromInternalStorage( witness.getInternalStorage(), (short) witness.getScale()); - Assert.assertEquals(dec, decRead); + assertEquals(dec, decRead); // Test fastUpdate from it's own (not fully compacted) serialized output Decimal128 decReadSelf = new Decimal128().fastUpdateFromInternalStorage( hdw.getInternalStorage(), (short) hdw.getScale()); - Assert.assertEquals(dec, decReadSelf); + assertEquals(dec, decReadSelf); } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testFastStreamForHiveDecimal() { doTestFastStreamForHiveDecimal("0"); @@ -217,7 +221,10 @@ void doTestDecimalWithBoundsCheck(Decimal128 value) { } @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testHive6594() { + Decimal128FastBuffer scratch = new Decimal128FastBuffer(); String[] vs = new String[] { "-4033.445769230769", "6984454.211097692"}; @@ -236,7 +243,7 @@ public void testHive6594() { BigDecimal readValue = hd.bigDecimalValue(); - Assert.assertEquals(d.toBigDecimal().stripTrailingZeros(), + assertEquals(d.toBigDecimal().stripTrailingZeros(), readValue.stripTrailingZeros()); } } diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveVarcharWritable.java b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveVarcharWritable.java index 74e7228..32d0da7 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveVarcharWritable.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestHiveVarcharWritable.java @@ -17,11 +17,21 @@ */ package org.apache.hadoop.hive.serde2.io; -import junit.framework.TestCase; +import com.google.code.tempusfugit.concurrency.annotations.*; +import com.google.code.tempusfugit.concurrency.*; +import org.junit.*; +import static org.junit.Assert.*; + import org.apache.hadoop.hive.common.type.HiveVarchar; import java.io.*; -public class TestHiveVarcharWritable extends TestCase { +public class TestHiveVarcharWritable { + @Rule public ConcurrentRule concurrentRule = new ConcurrentRule(); + @Rule public RepeatingRule repeatingRule = new RepeatingRule(); + + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testStringLength() throws Exception { HiveVarcharWritable vc1 = new HiveVarcharWritable(new HiveVarchar("0123456789", 10)); assertEquals(10, vc1.getCharacterLength()); @@ -54,6 +64,9 @@ public void testStringLength() throws Exception { assertEquals(6, vc1.getCharacterLength()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testEnforceLength() throws Exception { HiveVarcharWritable vc1 = new HiveVarcharWritable(new HiveVarchar("0123456789", 10)); assertEquals(10, vc1.getCharacterLength()); @@ -66,8 +79,11 @@ public void testEnforceLength() throws Exception { vc1.enforceMaxLength(8); assertEquals(8, vc1.getCharacterLength()); -} + } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testComparison() throws Exception { HiveVarcharWritable hc1 = new HiveVarcharWritable(new HiveVarchar("abcd", 20)); HiveVarcharWritable hc2 = new HiveVarcharWritable(new HiveVarchar("abcd", 20)); @@ -101,6 +117,9 @@ public void testComparison() throws Exception { assertFalse(0 == hc2.compareTo(hc1)); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testStringValue() throws Exception { HiveVarcharWritable vc1 = new HiveVarcharWritable(new HiveVarchar("abcde", 20)); assertEquals("abcde", vc1.toString()); diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestTimestampWritable.java b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestTimestampWritable.java index 0e7b418..5fee019 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestTimestampWritable.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestTimestampWritable.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.hive.serde2.io; +import com.google.code.tempusfugit.concurrency.annotations.*; +import com.google.code.tempusfugit.concurrency.*; + import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.DataInputStream; @@ -32,15 +35,25 @@ import java.util.Random; import java.util.TimeZone; -import junit.framework.TestCase; +import org.junit.*; +import static org.junit.Assert.*; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableUtils; -public class TestTimestampWritable extends TestCase { +public class TestTimestampWritable { + + @Rule public ConcurrentRule concurrentRule = new ConcurrentRule(); + @Rule public RepeatingRule repeatingRule = new RepeatingRule(); - private static DateFormat DATE_FORMAT = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + private static ThreadLocal DATE_FORMAT = + new ThreadLocal() { + @Override + protected synchronized DateFormat initialValue() { + return new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"); + } + }; private static final int HAS_DECIMAL_MASK = 0x80000000; @@ -64,14 +77,14 @@ private static long getSeconds(Timestamp ts) { private static long parseToMillis(String s) { try { - return DATE_FORMAT.parse(s).getTime(); + return DATE_FORMAT.get().parse(s).getTime(); } catch (ParseException ex) { throw new RuntimeException(ex); } } - @Override - protected void setUp() { + @Before + public void setUp() { TimeZone.setDefault(TimeZone.getTimeZone("UTC")); } @@ -252,6 +265,9 @@ private static TimestampWritable fromIntAndVInts(int i, long... vints) throws IO return tsw; } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testReverseNanos() { assertEquals(0, reverseNanos(0)); assertEquals(120000000, reverseNanos(21)); @@ -265,6 +281,8 @@ public void testReverseNanos() { * Test serializing and deserializing timestamps that can be represented by a number of seconds * from 0 to 2147483647 since the UNIX epoch. */ + @Test + @Concurrent(count=4) public void testTimestampsWithinPositiveIntRange() throws IOException { Random rand = new Random(294722773L); for (int i = 0; i < 10000; ++i) { @@ -281,6 +299,8 @@ private static long randomMillis(long minMillis, long maxMillis, Random rand) { * Test timestamps that don't necessarily fit between 1970 and 2038. This depends on HIVE-4525 * being fixed. */ + @Test + @Concurrent(count=4) public void testTimestampsOutsidePositiveIntRange() throws IOException { Random rand = new Random(789149717L); for (int i = 0; i < 10000; ++i) { @@ -289,6 +309,8 @@ public void testTimestampsOutsidePositiveIntRange() throws IOException { } } + @Test + @Concurrent(count=4) public void testTimestampsInFullRange() throws IOException { Random rand = new Random(2904974913L); for (int i = 0; i < 10000; ++i) { @@ -296,6 +318,8 @@ public void testTimestampsInFullRange() throws IOException { } } + @Test + @Concurrent(count=4) public void testToFromDouble() { Random rand = new Random(294729777L); for (int nanosPrecision = 0; nanosPrecision <= 4; ++nanosPrecision) { @@ -326,6 +350,8 @@ private static HiveDecimal timestampToDecimal(Timestamp ts) { return HiveDecimal.create(d); } + @Test + @Concurrent(count=4) public void testDecimalToTimestampRandomly() { Random rand = new Random(294729777L); for (int i = 0; i < 10000; ++i) { @@ -336,6 +362,9 @@ public void testDecimalToTimestampRandomly() { } } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testDecimalToTimestampCornerCases() { Timestamp ts = new Timestamp(parseToMillis("1969-03-04 05:44:33")); assertEquals(0, ts.getTime() % 1000); @@ -347,6 +376,9 @@ public void testDecimalToTimestampCornerCases() { } } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testSerializationFormatDirectly() throws IOException { assertEquals("1970-01-01 00:00:00", fromIntAndVInts(0).toString()); assertEquals("1970-01-01 00:00:01", fromIntAndVInts(1).toString()); @@ -374,6 +406,9 @@ public void testSerializationFormatDirectly() throws IOException { -3210 - 1, seconds >> 31).toString()); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testMaxSize() { // This many bytes are necessary to store the reversed nanoseconds. assertEquals(5, WritableUtils.getVIntSize(999999999)); @@ -396,6 +431,9 @@ public void testMaxSize() { // Therefore, the maximum total size of a serialized timestamp is 4 + 5 + 4 = 13. } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testMillisToSeconds() { assertEquals(0, TimestampWritable.millisToSeconds(0)); assertEquals(-1, TimestampWritable.millisToSeconds(-1)); @@ -427,6 +465,9 @@ private static int normalizeComparisonResult(int result) { return result < 0 ? -1 : (result > 0 ? 1 : 0); } + @Test + @Concurrent(count=4) + @Repeating(repetition=100) public void testBinarySortable() { Random rand = new Random(5972977L); List tswList = new ArrayList(); diff --git a/serde/src/test/resources/avro-struct.avsc b/serde/src/test/resources/avro-struct.avsc index c8c83d7..7bfcde9 100644 --- a/serde/src/test/resources/avro-struct.avsc +++ b/serde/src/test/resources/avro-struct.avsc @@ -4,11 +4,11 @@ "namespace":"", "doc":"struct", +field12:decimal(4,2),field13:date,field14:void>", "fields":[ {"name":"field1","type":["null","string"],"doc":"string","default":null}, -{"name":"field2","type":["null","string"],"doc":"char(5)","default":null}, -{"name":"field3","type":["null","string"],"doc":"varchar(5)","default":null}, +{"name":"field2","type":["null",{"type":"string","logicalType":"char","maxLength":5}],"doc":"char(5)","default":null}, +{"name":"field3","type":["null",{"type":"string","logicalType":"varchar","maxLength":5}],"doc":"varchar(5)","default":null}, {"name":"field4","type":["null","bytes"],"doc":"binary","default":null}, {"name":"field5","type":["null","int"],"doc":"tinyint","default":null}, {"name":"field6","type":["null","int"],"doc":"smallint","default":null}, @@ -17,8 +17,8 @@ field12:decimal(4,2),field13:void>", {"name":"field9","type":["null","float"],"doc":"float","default":null}, {"name":"field10","type":["null","double"],"doc":"double","default":null}, {"name":"field11","type":["null","boolean"],"doc":"boolean","default":null}, -{"name":"field12","type":["null",{"type":"bytes","logicalType":"decimal","precision":4, -"scale":2}],"doc":"decimal(4,2)","default":null}, -{"name":"field13","type":"null","doc":"void","default":null} +{"name":"field12","type":["null",{"type":"bytes","logicalType":"decimal","precision":4,"scale":2}],"doc":"decimal(4,2)","default":null}, +{"name":"field13","type":["null",{"type":"int","logicalType":"date"}],"doc":"date","default":null}, +{"name":"field14","type":"null","doc":"void","default":null} ] -} \ No newline at end of file +} diff --git a/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java b/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java index a0f7667..83dd2e6 100644 --- a/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java +++ b/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java @@ -23,6 +23,7 @@ import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map; + import javax.security.auth.login.LoginException; import javax.security.sasl.Sasl; @@ -31,6 +32,7 @@ import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hive.service.cli.HiveSQLException; import org.apache.hive.service.cli.thrift.ThriftCLIService; import org.apache.thrift.TProcessorFactory; @@ -136,15 +138,17 @@ public TTransportFactory getAuthTransFactory() throws LoginException { return transportFactory; } + /** + * Returns the thrift processor factory for HiveServer2 running in binary mode + * @param service + * @return + * @throws LoginException + */ public TProcessorFactory getAuthProcFactory(ThriftCLIService service) throws LoginException { - if ("http".equalsIgnoreCase(transportMode)) { - return HttpAuthUtils.getAuthProcFactory(service); + if (authTypeStr.equalsIgnoreCase(AuthTypes.KERBEROS.getAuthName())) { + return KerberosSaslHelper.getKerberosProcessorFactory(saslServer, service); } else { - if (authTypeStr.equalsIgnoreCase(AuthTypes.KERBEROS.getAuthName())) { - return KerberosSaslHelper.getKerberosProcessorFactory(saslServer, service); - } else { - return PlainSaslHelper.getPlainProcessorFactory(service); - } + return PlainSaslHelper.getPlainProcessorFactory(service); } } @@ -287,7 +291,9 @@ public static void verifyProxyAccess(String realUser, String proxyUser, String i try { UserGroupInformation sessionUgi; if (ShimLoader.getHadoopShims().isSecurityEnabled()) { - sessionUgi = ShimLoader.getHadoopShims().createProxyUser(realUser); + KerberosName kerbName = new KerberosName(realUser); + String shortPrincipalName = kerbName.getServiceName(); + sessionUgi = ShimLoader.getHadoopShims().createProxyUser(shortPrincipalName); } else { sessionUgi = ShimLoader.getHadoopShims().createRemoteUser(realUser, null); } @@ -300,5 +306,5 @@ public static void verifyProxyAccess(String realUser, String proxyUser, String i "Failed to validate proxy privilege of " + realUser + " for " + proxyUser, e); } } - + } diff --git a/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java b/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java index 82093fa..10b6c79 100644 --- a/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java +++ b/service/src/java/org/apache/hive/service/auth/HttpAuthUtils.java @@ -22,17 +22,10 @@ import java.security.PrivilegedExceptionAction; import org.apache.commons.codec.binary.Base64; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hive.service.cli.thrift.TCLIService; -import org.apache.hive.service.cli.thrift.TCLIService.Iface; -import org.apache.hive.service.cli.thrift.ThriftCLIService; import org.apache.http.protocol.BasicHttpContext; import org.apache.http.protocol.HttpContext; -import org.apache.thrift.TProcessor; -import org.apache.thrift.TProcessorFactory; -import org.apache.thrift.transport.TTransport; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSCredential; import org.ietf.jgss.GSSManager; @@ -48,11 +41,7 @@ public static final String AUTHORIZATION = "Authorization"; public static final String BASIC = "Basic"; public static final String NEGOTIATE = "Negotiate"; - - public static TProcessorFactory getAuthProcFactory(ThriftCLIService service) { - return new HttpCLIServiceProcessorFactory(service); - } - + /** * @return Stringified Base64 encoded kerberosAuthHeader on success */ @@ -62,7 +51,7 @@ public static String getKerberosServiceTicket(String principal, String host, Str String serverPrincipal = getServerPrincipal(principal, host); // Uses the Ticket Granting Ticket in the UserGroupInformation return clientUGI.doAs( - new HttpKerberosClientAction(serverPrincipal, clientUGI.getShortUserName(), serverHttpUrl)); + new HttpKerberosClientAction(serverPrincipal, clientUGI.getUserName(), serverHttpUrl)); } /** @@ -87,26 +76,6 @@ private HttpAuthUtils() { throw new UnsupportedOperationException("Can't initialize class"); } - public static class HttpCLIServiceProcessorFactory extends TProcessorFactory { - - private final ThriftCLIService service; - private final HiveConf hiveConf; - private final boolean isDoAsEnabled; - - public HttpCLIServiceProcessorFactory(ThriftCLIService service) { - super(null); - this.service = service; - hiveConf = service.getHiveConf(); - isDoAsEnabled = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS); - } - - @Override - public TProcessor getProcessor(TTransport trans) { - TProcessor baseProcessor = new TCLIService.Processor(service); - return isDoAsEnabled ? new HttpCLIServiceUGIProcessor(baseProcessor) : baseProcessor; - } - } - public static class HttpKerberosClientAction implements PrivilegedExceptionAction { public static final String HTTP_RESPONSE = "HTTP_RESPONSE"; diff --git a/service/src/java/org/apache/hive/service/auth/HttpCLIServiceUGIProcessor.java b/service/src/java/org/apache/hive/service/auth/HttpCLIServiceUGIProcessor.java index 245d793..e69de29 100644 --- a/service/src/java/org/apache/hive/service/auth/HttpCLIServiceUGIProcessor.java +++ b/service/src/java/org/apache/hive/service/auth/HttpCLIServiceUGIProcessor.java @@ -1,79 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hive.service.auth; - -import java.io.IOException; -import java.security.PrivilegedExceptionAction; -import java.util.ArrayList; - -import org.apache.hadoop.hive.shims.HadoopShims; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hive.service.cli.session.SessionManager; -import org.apache.thrift.TException; -import org.apache.thrift.TProcessor; -import org.apache.thrift.protocol.TProtocol; - -/** - * Wraps the underlying Thrift processor's process call, - * to assume the client user's UGI/Subject for the doAs calls. - * Gets the client's username from a ThreadLocal in SessionManager which is - * set in the ThriftHttpServlet, and constructs a client UGI object from that. - */ -public class HttpCLIServiceUGIProcessor implements TProcessor { - - private final TProcessor underlyingProcessor; - private final HadoopShims shim; - - public HttpCLIServiceUGIProcessor(TProcessor underlyingProcessor) { - this.underlyingProcessor = underlyingProcessor; - shim = ShimLoader.getHadoopShims(); - } - - @Override - public boolean process(final TProtocol in, final TProtocol out) throws TException { - /* - * Build the client UGI from ThreadLocal username [SessionManager.getUserName()]. - * The ThreadLocal username is set in the ThriftHttpServlet. - */ - try { - UserGroupInformation clientUgi = - shim.createRemoteUser(SessionManager.getUserName(), new ArrayList()); - return shim.doAs(clientUgi, new PrivilegedExceptionAction() { - @Override - public Boolean run() { - try { - return underlyingProcessor.process(in, out); - } catch (TException te) { - throw new RuntimeException(te); - } - } - }); - } catch (RuntimeException rte) { - if (rte.getCause() instanceof TException) { - throw (TException) rte.getCause(); - } - throw rte; - } catch (InterruptedException ie) { - throw new RuntimeException(ie); // unexpected! - } catch (IOException ioe) { - throw new RuntimeException(ioe); // unexpected! - } - } -} diff --git a/service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java b/service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java index 19722f2..645e3e2 100644 --- a/service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java +++ b/service/src/java/org/apache/hive/service/auth/TSetIpAddressProcessor.java @@ -18,7 +18,6 @@ package org.apache.hive.service.auth; -import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Processor; import org.apache.hive.service.cli.thrift.TCLIService; import org.apache.hive.service.cli.thrift.TCLIService.Iface; import org.apache.thrift.TException; @@ -43,7 +42,7 @@ */ public class TSetIpAddressProcessor extends TCLIService.Processor { - private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName()); + private static final Logger LOGGER = LoggerFactory.getLogger(TSetIpAddressProcessor.class.getName()); public TSetIpAddressProcessor(Iface iface) { super(iface); @@ -75,7 +74,7 @@ protected void setIpAddress(final TProtocol in) { if (tSocket == null) { LOGGER.warn("Unknown Transport, cannot determine ipAddress"); } else { - THREAD_LOCAL_IP_ADDRESS.set(tSocket.getSocket().getInetAddress().toString()); + THREAD_LOCAL_IP_ADDRESS.set(tSocket.getSocket().getInetAddress().getHostAddress()); } } diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index 5231d5e..f021870 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -166,15 +166,20 @@ private void processGlobalInitFile() { IHiveFileProcessor processor = new GlobalHivercFileProcessor(); try { - if (hiveConf.getVar(ConfVars.HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION) != null) { - String hiverc = hiveConf.getVar(ConfVars.HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION) - + File.separator + SessionManager.HIVERCFILE; - if (new File(hiverc).exists()) { - LOG.info("Running global init file: " + hiverc); - int rc = processor.processFile(hiverc); + String hiverc = hiveConf.getVar(ConfVars.HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION); + if (hiverc != null) { + File hivercFile = new File(hiverc); + if (hivercFile.isDirectory()) { + hivercFile = new File(hivercFile, SessionManager.HIVERCFILE); + } + if (hivercFile.isFile()) { + LOG.info("Running global init file: " + hivercFile); + int rc = processor.processFile(hivercFile.getAbsolutePath()); if (rc != 0) { - LOG.warn("Failed on initializing global .hiverc file"); + LOG.error("Failed on initializing global .hiverc file"); } + } else { + LOG.debug("Global init file " + hivercFile + " does not exist"); } } } catch (IOException e) { diff --git a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java index 4654acc..ecc9b96 100644 --- a/service/src/java/org/apache/hive/service/cli/session/SessionManager.java +++ b/service/src/java/org/apache/hive/service/cli/session/SessionManager.java @@ -229,6 +229,23 @@ public SessionHandle openSession(TProtocolVersion protocol, String username, Str return openSession(protocol, username, password, ipAddress, sessionConf, false, null); } + /** + * Opens a new session and creates a session handle. + * The username passed to this method is the effective username. + * If withImpersonation is true (==doAs true) we wrap all the calls in HiveSession + * within a UGI.doAs, where UGI corresponds to the effective user. + * @see org.apache.hive.service.cli.thrift.ThriftCLIService#getUserName() + * + * @param protocol + * @param username + * @param password + * @param ipAddress + * @param sessionConf + * @param withImpersonation + * @param delegationToken + * @return + * @throws HiveSQLException + */ public SessionHandle openSession(TProtocolVersion protocol, String username, String password, String ipAddress, Map sessionConf, boolean withImpersonation, String delegationToken) throws HiveSQLException { diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index c4b273c..4a1e004 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -262,6 +262,16 @@ private String getIpAddress() { return clientIpAddress; } + /** + * Returns the effective username. + * 1. If hive.server2.allow.user.substitution = false: the username of the connecting user + * 2. If hive.server2.allow.user.substitution = true: the username of the end user, + * that the connecting user is trying to proxy for. + * This includes a check whether the connecting user is allowed to proxy for the end user. + * @param req + * @return + * @throws HiveSQLException + */ private String getUserName(TOpenSessionReq req) throws HiveSQLException { String userName = null; // Kerberos diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java index 795115e..cfa7284 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java @@ -31,6 +31,7 @@ import org.apache.hive.service.auth.HiveAuthFactory; import org.apache.hive.service.auth.HiveAuthFactory.AuthTypes; import org.apache.hive.service.cli.CLIService; +import org.apache.hive.service.cli.thrift.TCLIService.Iface; import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; import org.apache.thrift.TProcessor; import org.apache.thrift.TProcessorFactory; @@ -102,8 +103,7 @@ public void run() { // Thrift configs hiveAuthFactory = new HiveAuthFactory(hiveConf); - TProcessorFactory processorFactory = hiveAuthFactory.getAuthProcFactory(this); - TProcessor processor = processorFactory.getProcessor(null); + TProcessor processor = new TCLIService.Processor(this); TProtocolFactory protocolFactory = new TBinaryProtocol.Factory(); // Set during the init phase of HiveServer2 if auth mode is kerberos // UGI for the hive/_HOST (kerberos) principal diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java index 99ef8bc..312d05e 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hive.service.auth.AuthenticationProviderFactory; import org.apache.hive.service.auth.AuthenticationProviderFactory.AuthMethods; import org.apache.hive.service.auth.HiveAuthFactory; @@ -219,7 +220,7 @@ public String run() throws HttpAuthenticationException { "provided by the client."); } else { - return getPrincipalWithoutRealm(gssContext.getSrcName().toString()); + return getPrincipalWithoutRealmAndHost(gssContext.getSrcName().toString()); } } catch (GSSException e) { @@ -237,8 +238,19 @@ public String run() throws HttpAuthenticationException { } private String getPrincipalWithoutRealm(String fullPrincipal) { - String names[] = fullPrincipal.split("[@]"); - return names[0]; + KerberosName fullKerberosName = new KerberosName(fullPrincipal); + String serviceName = fullKerberosName.getServiceName(); + String hostName = fullKerberosName.getHostName(); + String principalWithoutRealm = serviceName; + if (hostName != null) { + principalWithoutRealm = serviceName + "/" + hostName; + } + return principalWithoutRealm; + } + + private String getPrincipalWithoutRealmAndHost(String fullPrincipal) { + KerberosName fullKerberosName = new KerberosName(fullPrincipal); + return fullKerberosName.getServiceName(); } } diff --git a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java index 7530d59..b4d517f 100644 --- a/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java +++ b/service/src/test/org/apache/hive/service/cli/CLIServiceTest.java @@ -150,7 +150,7 @@ public void testExecuteStatement() throws Exception { client.closeOperation(opHandle); // Blocking execute - queryString = "SELECT ID FROM TEST_EXEC"; + queryString = "SELECT ID+1 FROM TEST_EXEC"; opHandle = client.executeStatement(sessionHandle, queryString, confOverlay); // Expect query to be completed now assertEquals("Query should be finished", @@ -225,27 +225,27 @@ public void testExecuteStatementAsync() throws Exception { /** * Execute an async query with default config */ - queryString = "SELECT ID FROM " + tableName; + queryString = "SELECT ID+1 FROM " + tableName; runQueryAsync(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout); /** * Execute an async query with long polling timeout set to 0 */ longPollingTimeout = 0; - queryString = "SELECT ID FROM " + tableName; + queryString = "SELECT ID+1 FROM " + tableName; runQueryAsync(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout); /** * Execute an async query with long polling timeout set to 500 millis */ longPollingTimeout = 500; - queryString = "SELECT ID FROM " + tableName; + queryString = "SELECT ID+1 FROM " + tableName; runQueryAsync(sessionHandle, queryString, confOverlay, OperationState.FINISHED, longPollingTimeout); /** * Cancellation test */ - queryString = "SELECT ID FROM " + tableName; + queryString = "SELECT ID+1 FROM " + tableName; opHandle = client.executeStatementAsync(sessionHandle, queryString, confOverlay); System.out.println("Cancelling " + opHandle); client.cancelOperation(opHandle); diff --git a/service/src/test/org/apache/hive/service/cli/session/TestSessionGlobalInitFile.java b/service/src/test/org/apache/hive/service/cli/session/TestSessionGlobalInitFile.java index 5b1cbc0..47d3a56 100644 --- a/service/src/test/org/apache/hive/service/cli/session/TestSessionGlobalInitFile.java +++ b/service/src/test/org/apache/hive/service/cli/session/TestSessionGlobalInitFile.java @@ -44,6 +44,7 @@ private ThriftCLIServiceClient client; private File initFile; private String tmpDir; + private HiveConf hiveConf; /** * This class is almost the same as EmbeddedThriftBinaryCLIService, @@ -86,7 +87,7 @@ public void setUp() throws Exception { FileUtils.writeLines(initFile, Arrays.asList(fileContent)); // set up service and client - HiveConf hiveConf = new HiveConf(); + hiveConf = new HiveConf(); hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION, initFile.getParentFile().getAbsolutePath()); service = new FakeEmbeddedThriftBinaryCLIService(hiveConf); @@ -102,11 +103,26 @@ public void tearDown() throws Exception { @Test public void testSessionGlobalInitFile() throws Exception { - /** - * create session, and fetch the property set in global init file. Test if - * the global init file .hiverc is loaded correctly by checking the expected - * setting property. - */ + File tmpInitFile = new File(initFile.getParent(), "hiverc"); + Assert.assertTrue("Failed to rename " + initFile + " to " + tmpInitFile, + initFile.renameTo(tmpInitFile)); + initFile = tmpInitFile; + hiveConf.setVar(HiveConf.ConfVars.HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION, + initFile.getAbsolutePath()); + doTestSessionGlobalInitFile(); + } + + @Test + public void testSessionGlobalInitDir() throws Exception { + doTestSessionGlobalInitFile(); + } + + /** + * create session, and fetch the property set in global init file. Test if + * the global init file .hiverc is loaded correctly by checking the expected + * setting property. + */ + private void doTestSessionGlobalInitFile() throws Exception { SessionHandle sessionHandle = client.openSession(null, null, null); verifyInitProperty("a", "1", sessionHandle); diff --git a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java index 58420d9..630cfc9 100644 --- a/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java +++ b/service/src/test/org/apache/hive/service/cli/thrift/ThriftCLIServiceTest.java @@ -177,7 +177,7 @@ public void testExecuteStatement() throws Exception { client.executeStatement(sessHandle, queryString, opConf); // Execute another query - queryString = "SELECT ID FROM TEST_EXEC_THRIFT"; + queryString = "SELECT ID+1 FROM TEST_EXEC_THRIFT"; OperationHandle opHandle = client.executeStatement(sessHandle, queryString, opConf); assertNotNull(opHandle); @@ -227,7 +227,7 @@ public void testExecuteStatementAsync() throws Exception { client.executeStatement(sessHandle, queryString, opConf); // Execute another query - queryString = "SELECT ID FROM TEST_EXEC_ASYNC_THRIFT"; + queryString = "SELECT ID+1 FROM TEST_EXEC_ASYNC_THRIFT"; System.out.println("Will attempt to execute: " + queryString); opHandle = client.executeStatementAsync(sessHandle, queryString, opConf); diff --git a/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java b/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java index 9592bc4..a353a46 100644 --- a/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java +++ b/shims/0.20/src/main/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java @@ -918,4 +918,14 @@ public boolean supportStickyBit() { public boolean hasStickyBit(FsPermission permission) { return false; // not supported } + + @Override + public boolean supportTrashFeature() { + return false; + } + + @Override + public Path getCurrentTrashPath(Configuration conf, FileSystem fs) { + return null; + } } diff --git a/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java b/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java index bbb3f4e..030cb75 100644 --- a/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java +++ b/shims/0.20S/src/main/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java @@ -534,6 +534,16 @@ public boolean supportStickyBit() { @Override public boolean hasStickyBit(FsPermission permission) { - return false; // not supported + return false; + } + + @Override + public boolean supportTrashFeature() { + return false; + } + + @Override + public Path getCurrentTrashPath(Configuration conf, FileSystem fs) { + return null; } } diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index afeb073..0731108 100644 --- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.ProxyFileSystem; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.Trash; +import org.apache.hadoop.fs.TrashPolicy; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; import org.apache.hadoop.fs.permission.AclEntryType; @@ -835,4 +836,15 @@ public boolean supportStickyBit() { public boolean hasStickyBit(FsPermission permission) { return permission.getStickyBit(); } + + @Override + public boolean supportTrashFeature() { + return true; + } + + @Override + public Path getCurrentTrashPath(Configuration conf, FileSystem fs) { + TrashPolicy tp = TrashPolicy.getInstance(conf, fs, fs.getHomeDirectory()); + return tp.getCurrentTrashDir(); + } } diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java index 282437c..4fcaa1e 100644 --- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java +++ b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HadoopShims.java @@ -721,4 +721,14 @@ public void checkFileAccess(FileSystem fs, FileStatus status, FsAction action) * @return sticky bit */ boolean hasStickyBit(FsPermission permission); + + /** + * @return True if the current hadoop supports trash feature. + */ + boolean supportTrashFeature(); + + /** + * @return Path to HDFS trash, if current hadoop supports trash feature. Null otherwise. + */ + Path getCurrentTrashPath(Configuration conf, FileSystem fs); }