diff --git a/LICENSE b/LICENSE index 2885945..4349934 100644 --- a/LICENSE +++ b/LICENSE @@ -207,57 +207,7 @@ APACHE HIVE SUBCOMPONENTS: The Apache Hive project contains subcomponents with separate copyright notices and license terms. Your use of the source code for the these subcomponents is subject to the terms and conditions of the following -licenses. - -For the org.apache.hadoop.hive.ql.util.jdbm.* classes: - -/** - * JDBM LICENSE v1.00 - * - * Redistribution and use of this software and associated documentation - * ("Software"), with or without modification, are permitted provided - * that the following conditions are met: - * - * 1. Redistributions of source code must retain copyright - * statements and notices. Redistributions must also contain a - * copy of this document. - * - * 2. Redistributions in binary form must reproduce the - * above copyright notice, this list of conditions and the - * following disclaimer in the documentation and/or other - * materials provided with the distribution. - * - * 3. The name "JDBM" must not be used to endorse or promote - * products derived from this Software without prior written - * permission of Cees de Groot. For written permission, - * please contact cg@cdegroot.com. - * - * 4. Products derived from this Software may not be called "JDBM" - * nor may "JDBM" appear in their names without prior written - * permission of Cees de Groot. - * - * 5. Due credit should be given to the JDBM Project - * (http://jdbm.sourceforge.net/). - * - * THIS SOFTWARE IS PROVIDED BY THE JDBM PROJECT AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT - * NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL - * CEES DE GROOT OR ANY CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Copyright 2000 (C) Cees de Groot. All Rights Reserved. - * Copyright 2000-2001 (C) Alex Boisvert. All Rights Reserved. - * Contributions are Copyright (C) 2000 by their associated contributors. - * - * $Id: RecordManager.java,v 1.3 2005/06/25 23:12:31 doomdark Exp $ - */ +licenses. For the ANTLR libraries: diff --git a/NOTICE b/NOTICE index 636472e..5d08ed7 100644 --- a/NOTICE +++ b/NOTICE @@ -1,9 +1,9 @@ Apache Hive -Copyright 2008-2013 The Apache Software Foundation +Copyright 2008-2014 The Apache Software Foundation This product includes software developed by The Apache Software Foundation (http://www.apache.org/). This product includes Jersey (https://jersey.java.net/) -Copyright (c) 2010-2013 Oracle and/or its affiliates. +Copyright (c) 2010-2014 Oracle and/or its affiliates. diff --git a/RELEASE_NOTES.txt b/RELEASE_NOTES.txt index e8c0728..ec6598d 100644 --- a/RELEASE_NOTES.txt +++ b/RELEASE_NOTES.txt @@ -1,3 +1,1120 @@ +Release Notes - Hive - Version 0.14.0 + +** Sub-task + * [HIVE-4629] - HS2 should support an API to retrieve query logs + * [HIVE-5176] - Wincompat : Changes for allowing various path compatibilities with Windows + * [HIVE-5179] - Wincompat : change script tests from bash to sh + * [HIVE-5579] - Update statistics rules for different types of joins + * [HIVE-5760] - Add vectorized support for CHAR/VARCHAR data types + * [HIVE-5804] - Support column statistics for expressions in GBY attributes, JOIN condition etc. when annotating operator tree with statistics + * [HIVE-5805] - Support for operators like PTF, Script, Extract etc. in statistics annotation. + * [HIVE-5868] - Add statistics rule for Union operator + * [HIVE-5949] - In statistics annotation add flag to say if statistics is estimated or accurate + * [HIVE-5998] - Add vectorized reader for Parquet files + * [HIVE-6031] - explain subquery rewrite for where clause predicates + * [HIVE-6123] - Implement checkstyle in maven + * [HIVE-6252] - sql std auth - support 'with admin option' in revoke role metastore api + * [HIVE-6290] - Add support for hbase filters for composite keys + * [HIVE-6367] - Implement Decimal in ParquetSerde + * [HIVE-6394] - Implement Timestmap in ParquetSerde + * [HIVE-6445] - Add qop support for kerberos over http in HiveServer2 + * [HIVE-6626] - Hive does not expand the DOWNLOADED_RESOURCES_DIR path + * [HIVE-6627] - HiveServer2 should handle scratch dir permissions / errors in a better way + * [HIVE-6714] - Fix getMapSize() of LazyMap + * [HIVE-6735] - Make scalable dynamic partitioning work in vectorized mode + * [HIVE-6760] - Scalable dynamic partitioning should bail out properly for list bucketing + * [HIVE-6761] - Hashcode computation does not use maximum parallelism for scalable dynamic partitioning + * [HIVE-6798] - Update column stats based on filter expression in stats annotation + * [HIVE-6815] - Version of the HIVE-6374 for Hive 0.13 + * [HIVE-6982] - Export all .sh equivalent for windows (.cmd files) in bin, bin/ext + * [HIVE-6993] - Update hive for Tez VertexLocationHint and getAVailableResource API changes + * [HIVE-7029] - Vectorize ReduceWork + * [HIVE-7078] - Need file sink operators that work with ACID + * [HIVE-7094] - Separate out static/dynamic partitioning code in FileRecordWriterContainer + * [HIVE-7156] - Group-By operator stat-annotation only uses distinct approx to generate rollups + * [HIVE-7184] - TestHadoop20SAuthBridge no longer compiles after HADOOP-10448 + * [HIVE-7204] - Use NULL vertex location hint for Prewarm DAG vertices + * [HIVE-7262] - Partitioned Table Function (PTF) query fails on ORC table when attempting to vectorize + * [HIVE-7286] - Parameterize HCatMapReduceTest for testing against all Hive storage formats + * [HIVE-7291] - Refactor TestParser to understand test-property file + * [HIVE-7350] - Changes related to TEZ-692, TEZ-1169, TEZ-1234 + * [HIVE-7357] - Add vectorized support for BINARY data type + * [HIVE-7398] - Parent GBY of MUX is removed even it's not for semijoin + * [HIVE-7404] - Revoke privilege should support revoking of grant option + * [HIVE-7405] - Vectorize GROUP BY on the Reduce-Side (Part 1 – Basic) + * [HIVE-7420] - Parameterize tests for HCatalog Pig interfaces for testing against all storage formats + * [HIVE-7427] - Changes for EdgeConfigurations + * [HIVE-7457] - Minor HCatalog Pig Adapter test clean up + * [HIVE-7491] - Stats annotation fails to evaluate constant expressions in filter operator + * [HIVE-7513] - Add ROW__ID VirtualColumn + * [HIVE-7535] - Make use of number of nulls column statistics in filter rule + * [HIVE-7536] - Make use of decimal column statistics in statistics annotation + * [HIVE-7544] - Changes related to TEZ-1288 (FastTezSerialization) + * [HIVE-7548] - Precondition checks should not fail the merge task in case of automatic trigger + * [HIVE-7571] - RecordUpdater should read virtual columns from row + * [HIVE-7589] - Some fixes and improvements to statistics annotation rules + * [HIVE-7601] - Bring up tez-branch upto the API changes from TEZ-1058, TEZ-1303, TEZ-1346, TEZ-1041 + * [HIVE-7639] - Bring tez-branch upto api changes in TEZ-1379, TEZ-1057, TEZ-1382 + * [HIVE-7646] - Modify parser to support new grammar for Insert,Update,Delete + * [HIVE-7655] - CBO: Reading of partitioned table stats slows down explain + * [HIVE-7656] - Bring tez-branch up-to the API changes made by TEZ-1372 + * [HIVE-7663] - OrcRecordUpdater needs to implement getStats + * [HIVE-7679] - JOIN operator should update the column stats when number of rows changes + * [HIVE-7734] - Join stats annotation rule is not updating columns statistics correctly + * [HIVE-7735] - Implement Char, Varchar in ParquetSerDe + * [HIVE-7788] - Generate plans for insert, update, and delete + * [HIVE-7790] - Update privileges to check for update and delete + * [HIVE-7808] - Changes to work against Tez-0.5 RC + * [HIVE-7809] - Fix ObjectRegistry to work with Tez 0.5 + * [HIVE-7820] - union_null.q is not deterministic + * [HIVE-7825] - Bring tez-branch up-to the API changes made by TEZ-1472, TEZ-1469 + * [HIVE-7836] - Ease-out denominator for multi-attribute join case in statistics annotation + * [HIVE-7864] - [CBO] Query fails if it refers only partitioning column + * [HIVE-7869] - Build long running HS2 test framework + * [HIVE-7904] - Missing null check cause NPE when updating join column stats in statistics annotation + * [HIVE-7905] - CBO: more cost model changes + * [HIVE-7907] - Bring up tez branch to changes in TEZ-1038, TEZ-1500 + * [HIVE-7935] - Support dynamic service discovery for HiveServer2 + * [HIVE-7979] - Fix testconfiguration.property file in Tez branch + * [HIVE-7990] - With fetch column stats disabled number of elements in grouping set is not taken into account + * [HIVE-7991] - Incorrect calculation of number of rows in JoinStatsRule.process results in overflow + * [HIVE-7992] - StatsRulesProcFactory should gracefully handle overflows + * [HIVE-7994] - BMJ test fails on tez + * [HIVE-7995] - Column statistics from expression does not handle fields within complex types + * [HIVE-8006] - CBO Trunk Merge: Test fail that includes Table Sample, rows(), query hints + * [HIVE-8016] - CBO: PPD to honor hive Join Cond, Casting fixes, Add annotations for IF, Code cleanup + * [HIVE-8021] - CBO: support CTAS and insert ... select + * [HIVE-8046] - CBO: fix issues with Windowing queries + * [HIVE-8069] - CBO: RowResolver after SubQuery predicate handling should be reset to outer query block RR + * [HIVE-8111] - CBO trunk merge: duplicated casts for arithmetic expressions in Hive and CBO + * [HIVE-8125] - CBO Trunk Merge: On Failure Fall Back to Non CBO + * [HIVE-8144] - CBO: HiveProjectRel factory should create RelSubSets + * [HIVE-8145] - CBO: bail from Optiq planning if a Select list contains multiple references to the same name + * [HIVE-8159] - CBO: bail from Optiq planning if a Select list contains multiple references to the same name + * [HIVE-8168] - With dynamic partition enabled fact table selectivity is not taken into account when generating the physical plan (Use CBO cardinality using physical plan generation) + * [HIVE-8172] - HiveServer2 dynamic service discovery should let the JDBC client use default ZooKeeper namespace + * [HIVE-8173] - HiveServer2 dynamic service discovery: figure out best ZooKeeper ACLs for security + * [HIVE-8186] - Self join may fail if one side have virtual column(s) and other doesn't + * [HIVE-8193] - Hook HiveServer2 dynamic service discovery with session time out + * [HIVE-8194] - CBO: bail for having clause referring select expr aliases + * [HIVE-8199] - CBO Trunk Merge: quote2 test fails due to incorrect literal translation + * [HIVE-8223] - CBO Trunk Merge: partition_wise_fileformat2 select result depends on ordering + * [HIVE-8228] - CBO: fix couple of issues with partition pruning + * [HIVE-8237] - CBO: Use Fully qualified table name (db.tablename in ReloptHiveTable) + * [HIVE-8288] - HiveServer2 dynamic discovery should create znodes organized by version number & add support for removing server uri's of a particular version from the server script. + * [HIVE-8309] - CBO: Fix OB by removing constraining DT, Use external names for col Aliases, Remove unnecessary Selects, Make DT Name counter query specific + * [HIVE-8377] - Enable Kerberized SSL for HiveServer2 in http mode + * [HIVE-8454] - Select Operator does not rename column stats properly in case of select star + * [HIVE-8522] - CBO: Update Calcite Version to 0.9.2-incubating-SNAPSHOT + * [HIVE-8530] - CBO: Preserve types of literals + * [HIVE-8549] - NPE in PK-FK inference when one side of join is complex tree + * [HIVE-8580] - Support LateralViewJoinOperator and LateralViewForwardOperator in stats annotation + * [HIVE-8582] - CBO: Outer Join Simplification is broken + * [HIVE-8653] - CBO: Push Semi Join through, Project/Filter/Join + * [HIVE-8654] - CBO: parquet_ctas test returns incorrect results + * [HIVE-8655] - CBO: ppr_pushdown, udf_substr produces incorrect results due to broken tablesample handling + * [HIVE-8656] - CBO: auto_join_filters fails + * [HIVE-8657] - CBO: inputddl5, udf_reverse tests fail + * [HIVE-8662] - CBO: tez_dml fails + * [HIVE-8768] - CBO: Fix filter selectivity for "in clause" & "<>" + + + +** Bug + * [HIVE-1363] - 'SHOW TABLE EXTENDED LIKE' command does not strip single/double quotes + * [HIVE-1608] - use sequencefile as the default for storing intermediate results + * [HIVE-1879] - Remove hive.metastore.metadb.dir property from hive-default.xml and HiveConf + * [HIVE-2137] - JDBC driver doesn't encode string properly. + * [HIVE-2390] - Add UNIONTYPE serialization support to LazyBinarySerDe + * [HIVE-2597] - Repeated key in GROUP BY is erroneously displayed when using DISTINCT + * [HIVE-3392] - Hive unnecessarily validates table SerDes when dropping a table + * [HIVE-3685] - TestCliDriver (script_pipe.q) failed with IBM JDK + * [HIVE-3925] - dependencies of fetch task are not shown by explain + * [HIVE-4064] - Handle db qualified names consistently across all HiveQL statements + * [HIVE-4118] - ANALYZE TABLE ... COMPUTE STATISTICS FOR COLUMNS fails when using fully qualified table name + * [HIVE-4274] - Table created using HCatalog java client doesn't set the owner + * [HIVE-4561] - Column stats : LOW_VALUE (or HIGH_VALUE) will always be 0.0000 ,if all the column values larger than 0.0 (or if all column values smaller than 0.0) + * [HIVE-4576] - templeton.hive.properties does not allow values with commas + * [HIVE-4577] - hive CLI can't handle hadoop dfs command with space and quotes. + * [HIVE-4723] - DDLSemanticAnalyzer.addTablePartsOutputs eats several exceptions + * [HIVE-4795] - Delete/Alter/Describe actions fail when SerDe is not on class path + * [HIVE-4965] - Add support so that PTFs can stream their output; Windowing PTF should do this + * [HIVE-5077] - Provide an option to run local task in process + * [HIVE-5092] - Fix hiveserver2 mapreduce local job on Windows + * [HIVE-5150] - UnsatisfiedLinkError when running hive unit tests on Windows + * [HIVE-5268] - HiveServer2 accumulates orphaned OperationHandle objects when a client fails while executing query + * [HIVE-5315] - Cannot attach debugger to Hiveserver2 + * [HIVE-5336] - HCatSchema.remove(HCatFieldSchema hcatFieldSchema) should renumber the fieldPositionMap and the fieldPositionMap should not be cached by the end user + * [HIVE-5376] - Hive does not honor type for partition columns when altering column type + * [HIVE-5434] - Creating a new HiveConnection does not handle the case when properties are supplied separately from connection string + * [HIVE-5456] - Queries fail on avro backed table with empty partition + * [HIVE-5607] - Hive fails to parse the "%" (mod) sign after brackets. + * [HIVE-5631] - Index creation on a skew table fails + * [HIVE-5664] - Drop cascade database fails when the db has any tables with indexes + * [HIVE-5677] - Beeline warns about unavailable files if HIVE_OPTS is set + * [HIVE-5789] - WebHCat E2E tests do not launch on Windows + * [HIVE-5847] - DatabaseMetadata.getColumns() doesn't show correct column size for char/varchar/decimal + * [HIVE-5870] - Move TestJDBCDriver2.testNewConnectionConfiguration to TestJDBCWithMiniHS2 + * [HIVE-6035] - Windows: percentComplete returned by job status from WebHCat is null + * [HIVE-6093] - table creation should fail when user does not have permissions on db + * [HIVE-6149] - TestJdbcDriver2 is unable to drop a database created from previous runs ("hbasedb") + * [HIVE-6176] - Beeline gives bogus error message if an unaccepted command line option is given + * [HIVE-6187] - Add test to verify that DESCRIBE TABLE works with quoted table names + * [HIVE-6195] - Create unit tests to exercise behaviour when creating a HBase Table in Hive + * [HIVE-6200] - Hive custom SerDe cannot load DLL added by "ADD FILE" command + * [HIVE-6245] - HS2 creates DBs/Tables with wrong ownership when HMS setugi is true + * [HIVE-6305] - test use of quoted identifiers in user/role names + * [HIVE-6313] - Minimr tests in hadoop-1 hangs on shutdown + * [HIVE-6321] - hiveserver2 --help says Unrecognized option: -h + * [HIVE-6322] - Fix file_with_header_footer_negative.q + * [HIVE-6331] - HIVE-5279 deprecated UDAF class without explanation/documentation/alternative + * [HIVE-6374] - Hive job submitted with non-default name node (fs.default.name) doesn't process locations properly + * [HIVE-6437] - DefaultHiveAuthorizationProvider should not initialize a new HiveConf + * [HIVE-6446] - Ability to specify hadoop.bin.path from command line -D + * [HIVE-6447] - Bucket map joins in hive-tez + * [HIVE-6480] - Metastore server startup script ignores ENV settings + * [HIVE-6487] - PTest2 do not copy failed source directories + * [HIVE-6508] - Mismatched results between vector and non-vector mode with decimal field + * [HIVE-6511] - casting from decimal to tinyint,smallint, int and bigint generates different result when vectorization is on + * [HIVE-6515] - Custom vertex in hive-tez should be able to accept multiple MR-inputs + * [HIVE-6521] - WebHCat cannot fetch correct percentComplete for Hive jobs + * [HIVE-6531] - Runtime errors in vectorized execution. + * [HIVE-6538] - yet another annoying exception in test logs + * [HIVE-6549] - remove templeton.jar from webhcat-default.xml, remove hcatalog/bin/hive-config.sh + * [HIVE-6550] - SemanticAnalyzer.reset() doesn't clear all the state + * [HIVE-6555] - TestSchemaTool is failing on trunk after branching + * [HIVE-6560] - varchar and char types cannot be cast to binary + * [HIVE-6563] - hdfs jar being pulled in when creating a hadoop-2 based hive tar ball + * [HIVE-6564] - WebHCat E2E tests that launch MR jobs fail on check job completion timeout + * [HIVE-6569] - HCatalog still has references to deprecated property hive.metastore.local + * [HIVE-6570] - Hive variable substitution does not work with the "source" command + * [HIVE-6571] - query id should be available for logging during query compilation + * [HIVE-6583] - wrong sql comments : ----... instead of -- ---... + * [HIVE-6586] - Update parameters in HiveConf.java after commit HIVE-6037 + * [HIVE-6592] - WebHCat E2E test abort when pointing to https url of webhdfs + * [HIVE-6594] - UnsignedInt128 addition does not increase internal int array count resulting in corrupted values during serialization + * [HIVE-6597] - WebHCat E2E tests doAsTests_6 and doAsTests_7 need to be updated + * [HIVE-6598] - Importing the project into eclipse as maven project have some issues + * [HIVE-6601] - alter database commands should support schema synonym keyword + * [HIVE-6602] - Multi-user HiveServer2 throws error + * [HIVE-6612] - Misspelling "schemaTool completeted" + * [HIVE-6620] - UDF printf doesn't take either CHAR or VARCHAR as the first argument + * [HIVE-6622] - UDF translate doesn't take either CHAR or VARCHAR as any of its arguments + * [HIVE-6637] - UDF in_file() doesn't take CHAR or VARCHAR as input + * [HIVE-6648] - Permissions are not inherited correctly when tables have multiple partition columns + * [HIVE-6652] - Beeline gives evasive error message for any unrecognized command line arguement + * [HIVE-6669] - sourcing txn-script from schema script results in failure for mysql & oracle + * [HIVE-6683] - Beeline does not accept comments at end of line + * [HIVE-6684] - Beeline does not accept comments that are preceded by spaces + * [HIVE-6695] - bin/hcat should include hbase jar and dependencies in the classpath [followup/clone of HCATALOG-621] + * [HIVE-6698] - hcat.py script does not correctly load the hbase storage handler jars + * [HIVE-6707] - Lazy maps are broken (LazyMap and LazyBinaryMap) + * [HIVE-6709] - HiveServer2 help command is not recognizing properly. + * [HIVE-6711] - ORC maps uses getMapSize() from MapOI which is unreliable + * [HIVE-6715] - Hive JDBC should include username into open session request for non-sasl connection + * [HIVE-6724] - HCatStorer throws ClassCastException while storing tinyint/smallint data + * [HIVE-6726] - Hcat cli does not close SessionState + * [HIVE-6727] - Table level stats for external tables are set incorrectly + * [HIVE-6741] - HiveServer2 startup fails in secure (kerberos) mode due to backward incompatible hadoop change + * [HIVE-6745] - HCat MultiOutputFormat hardcodes DistributedCache keynames + * [HIVE-6756] - alter table set fileformat should set serde too + * [HIVE-6768] - remove hcatalog/webhcat/svr/src/main/config/override-container-log4j.properties + * [HIVE-6773] - Update readme for ptest2 framework + * [HIVE-6782] - HiveServer2Concurrency issue when running with tez intermittently, throwing "org.apache.tez.dag.api.SessionNotRunning: Application not running" error + * [HIVE-6783] - Incompatible schema for maps between parquet-hive and parquet-pig + * [HIVE-6784] - parquet-hive should allow column type change + * [HIVE-6785] - query fails when partitioned table's table level serde is ParquetHiveSerDe and partition level serde is of different SerDe + * [HIVE-6788] - Abandoned opened transactions not being timed out + * [HIVE-6792] - hive.warehouse.subdir.inherit.perms doesn't work correctly in CTAS + * [HIVE-6793] - DDLSemanticAnalyzer.analyzeShowRoles() should use HiveAuthorizationTaskFactory + * [HIVE-6807] - add HCatStorer ORC test to test missing columns + * [HIVE-6811] - LOAD command does not work with relative paths on Windows + * [HIVE-6817] - Some hadoop2-only tests need diffs to be updated + * [HIVE-6820] - HiveServer(2) ignores HIVE_OPTS + * [HIVE-6822] - TestAvroSerdeUtils fails with -Phadoop-2 + * [HIVE-6824] - Hive HBase query fails on Tez due to missing jars - part 2 + * [HIVE-6826] - Hive-tez has issues when different partitions work off of different input types + * [HIVE-6828] - Hive tez bucket map join conversion interferes with map join conversion + * [HIVE-6835] - Reading of partitioned Avro data fails if partition schema does not match table schema + * [HIVE-6843] - INSTR for UTF-8 returns incorrect position + * [HIVE-6847] - Improve / fix bugs in Hive scratch dir setup + * [HIVE-6853] - show create table for hbase tables should exclude LOCATION + * [HIVE-6858] - Unit tests decimal_udf.q, vectorization_div0.q fail with jdk-7. + * [HIVE-6861] - more hadoop2 only golden files to fix + * [HIVE-6862] - add DB schema DDL and upgrade 12to13 scripts for MS SQL Server + * [HIVE-6868] - Create table in HCatalog sets different SerDe defaults than what is set through the CLI + * [HIVE-6870] - Fix maven.repo.local setting in Hive build + * [HIVE-6871] - Build fixes to allow Windows to run TestCliDriver + * [HIVE-6877] - TestOrcRawRecordMerger is deleting test.tmp.dir + * [HIVE-6880] - TestHWISessionManager fails with -Phadoop-2 + * [HIVE-6883] - Dynamic partitioning optimization does not honor sort order or order by + * [HIVE-6884] - HiveLockObject and enclosed HiveLockObjectData override equal() method but didn't do so for hashcode() + * [HIVE-6888] - Hive leaks MapWork objects via Utilities::gWorkMap + * [HIVE-6890] - Bug in HiveStreaming API causes problems if hive-site.xml is missing on streaming client side + * [HIVE-6891] - Alter rename partition Perm inheritance and general partition/table group inheritance + * [HIVE-6893] - out of sequence error in HiveMetastore server + * [HIVE-6898] - Functions in hive are failing with java.lang.ClassNotFoundException on Tez + * [HIVE-6900] - HostUtil.getTaskLogUrl signature change causes compilation to fail + * [HIVE-6901] - Explain plan doesn't show operator tree for the fetch operator + * [HIVE-6908] - TestThriftBinaryCLIService.testExecuteStatementAsync has intermittent failures + * [HIVE-6910] - Invalid column access info for partitioned table + * [HIVE-6913] - Hive unable to find the hashtable file during complex multi-staged map join + * [HIVE-6915] - Hive Hbase queries fail on secure Tez cluster + * [HIVE-6916] - Export/import inherit permissions from parent directory + * [HIVE-6919] - hive sql std auth select query fails on partitioned tables + * [HIVE-6921] - index creation fails with sql std auth turned on + * [HIVE-6922] - NullPointerException in collect_set() UDAF + * [HIVE-6927] - Add support for MSSQL in schematool + * [HIVE-6928] - Beeline should not chop off "describe extended" results by default + * [HIVE-6931] - Windows unit test fixes + * [HIVE-6932] - hive README needs update + * [HIVE-6934] - PartitionPruner doesn't handle top level constant expression correctly + * [HIVE-6936] - Provide table properties to InputFormats + * [HIVE-6937] - Fix test reporting url's after jenkins move from bigtop + * [HIVE-6939] - TestExecDriver.testMapRedPlan3 fails on hadoop-2 + * [HIVE-6944] - WebHCat e2e tests broken by HIVE-6432 + * [HIVE-6945] - issues with dropping partitions on Oracle + * [HIVE-6946] - Make it easier to run WebHCat e2e tests + * [HIVE-6947] - More fixes for tests on hadoop-2 + * [HIVE-6952] - Hive 0.13 HiveOutputFormat breaks backwards compatibility + * [HIVE-6954] - After ALTER FILEFORMAT, DESCRIBE throwing exception + * [HIVE-6955] - ExprNodeColDesc isSame doesn't account for tabAlias: this affects trait Propagation in Joins + * [HIVE-6956] - Duplicate partitioning column for union when dynamic partition sort optimization is enabled + * [HIVE-6957] - SQL authorization does not work with HS2 binary mode and Kerberos auth + * [HIVE-6959] - Enable Constant propagation optimizer for Hive Vectorization + * [HIVE-6960] - Set Hive pom to use Hadoop-2.4 + * [HIVE-6961] - Drop partitions treats partition columns as strings + * [HIVE-6965] - Transaction manager should use RDBMS time instead of machine time + * [HIVE-6966] - More fixes for TestCliDriver on Windows + * [HIVE-6967] - Hive transaction manager fails when SQLServer is used as an RDBMS + * [HIVE-6968] - list bucketing feature does not update the location map for unpartitioned tables + * [HIVE-6972] - jdbc HTTP configuration options should be part of sessionConf part of connection string + * [HIVE-6976] - Show query id only when there's jobs on the cluster + * [HIVE-6978] - beeline always exits with 0 status, should exit with non-zero status on error + * [HIVE-6979] - Hadoop-2 test failures related to quick stats not being populated correctly + * [HIVE-6984] - Analyzing partitioned table with NULL values for the partition column failed with NPE + * [HIVE-6985] - sql std auth - privileges grants to public role not being honored + * [HIVE-6986] - MatchPath fails with small resultExprString + * [HIVE-6987] - Metastore qop settings won't work with Hadoop-2.4 + * [HIVE-6989] - Error with arithmetic operators with javaXML serialization + * [HIVE-6990] - Direct SQL fails when the explicit schema setting is different from the default one + * [HIVE-6994] - parquet-hive createArray strips null elements + * [HIVE-6995] - GenericUDFBridge should log exception when it is unable to instantiate UDF object + * [HIVE-6996] - FS based stats broken with indexed tables + * [HIVE-7001] - fs.permissions.umask-mode is getting unset when Session is started + * [HIVE-7003] - Fix typo in README + * [HIVE-7004] - Fix more unit test failures on hadoop-2 + * [HIVE-7005] - MiniTez tests have non-deterministic explain plans + * [HIVE-7006] - Fix ql_rewrite_gbtoidx.q output file + * [HIVE-7009] - HIVE_USER_INSTALL_DIR could not bet set to non-HDFS filesystem + * [HIVE-7011] - HiveInputFormat's split generation isn't thread safe + * [HIVE-7012] - Wrong RS de-duplication in the ReduceSinkDeDuplication Optimizer + * [HIVE-7015] - Failing to inherit group/permission should not fail the operation + * [HIVE-7016] - Hive returns wrong results when execute UDF on top of DISTINCT column + * [HIVE-7017] - Insertion into Parquet tables fails under Tez + * [HIVE-7021] - HiveServer2 memory leak on failed queries + * [HIVE-7023] - Bucket mapjoin is broken when the number of small aliases is two or more + * [HIVE-7027] - Hive job fails when referencing a view that explodes an array + * [HIVE-7030] - Remove hive.hadoop.classpath from hiveserver2.cmd + * [HIVE-7031] - Utiltites.createEmptyFile uses File.Separator instead of Path.Separator to create an empty file in HDFS + * [HIVE-7033] - grant statements should check if the role exists + * [HIVE-7035] - Templeton returns 500 for user errors - when job cannot be found + * [HIVE-7037] - Add additional tests for transform clauses with Tez + * [HIVE-7041] - DoubleWritable/ByteWritable should extend their hadoop counterparts + * [HIVE-7042] - Fix stats_partscan_1_23.q and orc_createas1.q for hadoop-2 + * [HIVE-7043] - When using the tez session pool via hive, once sessions time out, all queries go to the default queue + * [HIVE-7045] - Wrong results in multi-table insert aggregating without group by clause + * [HIVE-7050] - Display table level column stats in DESCRIBE FORMATTED TABLE + * [HIVE-7051] - Display partition level column stats in DESCRIBE FORMATTED PARTITION + * [HIVE-7052] - Optimize split calculation time + * [HIVE-7053] - Unable to fetch column stats from decimal columns + * [HIVE-7055] - config not propagating for PTFOperator + * [HIVE-7057] - webhcat e2e deployment scripts don't have x bit set + * [HIVE-7060] - Column stats give incorrect min and distinct_count + * [HIVE-7061] - sql std auth - insert queries without overwrite should not require delete privileges + * [HIVE-7062] - Support Streaming mode in Windowing + * [HIVE-7063] - Optimize for the Top N within a Group use case + * [HIVE-7065] - Hive jobs in webhcat run in default mr mode even in Hive on Tez setup + * [HIVE-7066] - hive-exec jar is missing avro core + * [HIVE-7067] - Min() and Max() on Timestamp and Date columns for ORC returns wrong results + * [HIVE-7071] - Use custom Tez split generator to support schema evolution + * [HIVE-7072] - HCatLoader only loads first region of hbase table + * [HIVE-7075] - JsonSerde raises NullPointerException when object key is not lower case + * [HIVE-7076] - Plugin (exec hook) to log to application timeline data to Yarn + * [HIVE-7077] - Hive contrib compilation maybe broken with removal of org.apache.hadoop.record + * [HIVE-7079] - Hive logs errors about missing tables when parsing CTE expressions + * [HIVE-7080] - In PTest framework, Add logs URL to the JIRA comment + * [HIVE-7082] - Vectorized parquet reader should create assigners only for the columns it assigns, not for scratch columns + * [HIVE-7083] - Fix test failures on trunk + * [HIVE-7087] - Remove lineage information after query completion + * [HIVE-7092] - Insert overwrite should not delete the original directory + * [HIVE-7096] - Support grouped splits in Tez partitioned broadcast join + * [HIVE-7099] - Add Decimal datatype support for Windowing + * [HIVE-7104] - Unit tests are disabled + * [HIVE-7105] - Enable ReduceRecordProcessor to generate VectorizedRowBatches + * [HIVE-7107] - Fix HiveServer1 JDBC Driver spec compliancy issue + * [HIVE-7109] - Resource leak in HBaseStorageHandler + * [HIVE-7112] - Tez processor swallows errors + * [HIVE-7114] - Extra Tez session is started during HiveServer2 startup + * [HIVE-7116] - HDFS FileSystem object cache causes permission issues in creating tmp directories + * [HIVE-7117] - Partitions not inheriting table permissions after alter rename partition + * [HIVE-7118] - Oracle upgrade schema scripts do not map Java long datatype columns correctly for transaction related tables + * [HIVE-7119] - Extended ACL's should be inherited if warehouse perm inheritance enabled + * [HIVE-7123] - Follow-up of HIVE-6367 + * [HIVE-7130] - schematool is broken for minor version upgrades (eg 0.13.x) + * [HIVE-7131] - Dependencies of fetch task for tez are not shown properly + * [HIVE-7135] - Fix test fail of TestTezTask.testSubmit + * [HIVE-7143] - Add Streaming support in Windowing mode for more UDAFs (min/max, lead/lag, fval/lval) + * [HIVE-7144] - GC pressure during ORC StringDictionary writes + * [HIVE-7146] - posexplode() UDTF fails with a NullPointerException on NULL columns + * [HIVE-7147] - ORC PPD should handle CHAR/VARCHAR types + * [HIVE-7149] - Parquet not able to handle negative decimal numbers + * [HIVE-7154] - TestMetrics fails intermittently on the trunk + * [HIVE-7155] - WebHCat controller job exceeds container memory limit + * [HIVE-7159] - For inner joins push a 'is not null predicate' to the join sources for every non nullSafe join condition + * [HIVE-7161] - TestMetastoreVersion fails intermittently on trunk + * [HIVE-7162] - hadoop-1 build broken by HIVE-7071 + * [HIVE-7165] - Fix hive-default.xml.template errors & omissions + * [HIVE-7167] - Hive Metastore fails to start with SQLServerException + * [HIVE-7169] - HiveServer2 in Http Mode should have a configurable IdleMaxTime timeout + * [HIVE-7170] - Fix display_colstats_tbllvl.q in trunk + * [HIVE-7173] - Support HIVE-4867 on mapjoin of MR Tasks + * [HIVE-7174] - Do not accept string as scale and precision when reading Avro schema + * [HIVE-7176] - FileInputStream is not closed in Commands#properties() + * [HIVE-7182] - ResultSet is not closed in JDBCStatsPublisher#init() + * [HIVE-7183] - Size of partColumnGrants should be checked in ObjectStore#removeRole() + * [HIVE-7187] - Reconcile jetty versions in hive + * [HIVE-7188] - sum(if()) returns wrong results with vectorization + * [HIVE-7190] - WebHCat launcher task failure can cause two concurent user jobs to run + * [HIVE-7191] - optimized map join hash table has a bug when it reaches 2Gb + * [HIVE-7192] - Hive Streaming - Some required settings are not mentioned in the documentation + * [HIVE-7199] - Cannot alter table to parquet + * [HIVE-7200] - Beeline output displays column heading even if --showHeader=false is set + * [HIVE-7201] - Fix TestHiveConf#testConfProperties test case + * [HIVE-7202] - DbTxnManager deadlocks in hcatalog.cli.TestSematicAnalysis.testAlterTblFFpart() + * [HIVE-7209] - allow metastore authorization api calls to be restricted to certain invokers + * [HIVE-7210] - NPE with "No plan file found" when running Driver instances on multiple threads + * [HIVE-7213] - COUNT(*) returns out-dated count value after TRUNCATE + * [HIVE-7220] - Empty dir in external table causes issue (root_dir_external_table.q failure) + * [HIVE-7225] - Unclosed Statement's in TxnHandler + * [HIVE-7226] - Windowing Streaming mode causes NPE for empty partitions + * [HIVE-7228] - StreamPrinter should be joined to calling thread + * [HIVE-7229] - String is compared using equal in HiveMetaStore#HMSHandler#init() + * [HIVE-7232] - VectorReduceSink is emitting incorrect JOIN keys + * [HIVE-7234] - Select on decimal column throws NPE + * [HIVE-7235] - TABLESAMPLE on join table is regarded as alias + * [HIVE-7236] - Tez progress monitor should indicate running/failed tasks + * [HIVE-7237] - hive.exec.parallel=true w/ Hive 0.13/Tez causes application to linger forever + * [HIVE-7241] - Wrong lock acquired for alter table rename partition + * [HIVE-7242] - alter table drop partition is acquiring the wrong type of lock + * [HIVE-7245] - Fix parquet_columnar + * [HIVE-7246] - Hive transaction manager hardwires bonecp as the JDBC pooling implementation + * [HIVE-7247] - Fix itests using hadoop-1 profile + * [HIVE-7249] - HiveTxnManager.closeTxnManger() throws if called after commitTxn() + * [HIVE-7251] - Fix StorageDescriptor usage in unit tests + * [HIVE-7257] - UDF format_number() does not work on FLOAT types + * [HIVE-7263] - Missing fixes from review of parquet-timestamp + * [HIVE-7265] - BINARY columns use BytesWritable::getBytes() without ::getLength() + * [HIVE-7268] - On Windows Hive jobs in Webhcat always run on default MR mode + * [HIVE-7271] - Speed up unit tests + * [HIVE-7274] - Update PTest2 to JClouds 1.7.3 + * [HIVE-7279] - UDF format_number() does not work on DECIMAL types + * [HIVE-7281] - DbTxnManager acquiring wrong level of lock for dynamic partitioning + * [HIVE-7282] - HCatLoader fail to load Orc map with null key + * [HIVE-7287] - hive --rcfilecat command is broken on Windows + * [HIVE-7294] - sql std auth - authorize show grant statements + * [HIVE-7298] - desc database extended does not show properties of the database + * [HIVE-7302] - Allow Auto-reducer parallelism to be turned off by a logical optimizer + * [HIVE-7303] - IllegalMonitorStateException when stmtHandle is null in HiveStatement + * [HIVE-7304] - Transitive Predicate Propagation doesn't happen properly after HIVE-7159 + * [HIVE-7314] - Wrong results of UDF when hive.cache.expr.evaluation is set + * [HIVE-7317] - authorization_explain.q fails when run in sequence + * [HIVE-7323] - Date type stats in ORC sometimes go stale + * [HIVE-7325] - Support non-constant expressions for ARRAY/MAP type indices. + * [HIVE-7326] - Hive complains invalid column reference with 'having' aggregate predicates + * [HIVE-7339] - hive --orcfiledump command is not supported on Windows + * [HIVE-7342] - support hiveserver2,metastore specific config files + * [HIVE-7344] - Add streaming support in Windowing mode for FirstVal, LastVal + * [HIVE-7345] - Beeline changes its prompt to reflect successful database connection even after failing to connect + * [HIVE-7346] - Wrong results caused by hive ppd under specific join condition + * [HIVE-7352] - Queries without tables fail under Tez + * [HIVE-7353] - HiveServer2 using embedded MetaStore leaks JDOPersistanceManager + * [HIVE-7354] - windows:Need to set hbase jars in hadoop classpath explicitly + * [HIVE-7356] - Table level stats collection fail for partitioned tables + * [HIVE-7359] - Stats based compute query replies fail to do simple column transforms + * [HIVE-7363] - VectorExpressionWriterDecimal is missing null check in setValue() + * [HIVE-7366] - getDatabase using direct sql + * [HIVE-7373] - Hive should not remove trailing zeros for decimal numbers + * [HIVE-7374] - SHOW COMPACTIONS fail with remote metastore when there are no compations + * [HIVE-7376] - add minimizeJar to jdbc/pom.xml + * [HIVE-7385] - Optimize for empty relation scans + * [HIVE-7389] - Reduce number of metastore calls in MoveTask (when loading dynamic partitions) + * [HIVE-7393] - Tez jobs sometimes fail with NPE processing input splits + * [HIVE-7394] - ORC writer logging fails when the padding is < 0.01 + * [HIVE-7396] - BucketingSortingReduceSinkOptimizer throws NullPointException during ETL + * [HIVE-7397] - Set the default threshold for fetch task conversion to 1Gb + * [HIVE-7399] - Timestamp type is not copied by ObjectInspectorUtils.copyToStandardObject + * [HIVE-7409] - Add workaround for a deadlock issue of Class.getAnnotation() + * [HIVE-7412] - column stats collection throws exception if all values for a column is null + * [HIVE-7414] - Update golden file for MiniTez temp_table.q + * [HIVE-7415] - Test TestMinimrCliDriver.testCliDriver_ql_rewrite_gbtoidx failing + * [HIVE-7417] - select count(1) from ... where true; fails in optimizer + * [HIVE-7419] - Missing break in SemanticAnalyzer#getTableDescFromSerDe() + * [HIVE-7421] - Make VectorUDFDateString use the same date parsing and formatting as GenericUDFDate + * [HIVE-7422] - Array out of bounds exception involving ql.exec.vector.expressions.aggregates.gen.VectorUDAFAvgDouble + * [HIVE-7423] - produce hive-exec-core.jar from ql module + * [HIVE-7424] - HiveException: Error evaluating concat(concat(' ', str2), ' ') in ql.exec.vector.VectorSelectOperator.processOp + * [HIVE-7426] - ClassCastException: ...IntWritable cannot be cast to ...Text involving ql.udf.generic.GenericUDFBasePad.evaluate + * [HIVE-7429] - Set replication for archive called before file exists + * [HIVE-7433] - ColumnMappins.ColumnMapping should expose public accessors for its fields + * [HIVE-7441] - Custom partition scheme gets rewritten with hive scheme upon concatenate + * [HIVE-7450] - Database should inherit perms of warehouse dir + * [HIVE-7451] - pass function name in create/drop function to authorization api + * [HIVE-7452] - Boolean comparison is done through reference equality rather than using equals + * [HIVE-7459] - Fix NPE when an empty file is included in a Hive query that uses CombineHiveInputFormat + * [HIVE-7470] - Wrong Thrift declaration for {{ShowCompactResponseElement}} + * [HIVE-7472] - CLONE - Import fails for tables created with default text, sequence and orc file formats using HCatalog API + * [HIVE-7473] - Null values in DECIMAL columns cause serialization issues with HCatalog + * [HIVE-7475] - Beeline requires newline at the end of each query in a file + * [HIVE-7481] - The planning side changes for SMB join on hive-tez + * [HIVE-7482] - The execution side changes for SMB join in hive-tez + * [HIVE-7486] - Delete jar should close current classloader + * [HIVE-7488] - pass column names being used for inputs to authorization api + * [HIVE-7490] - Revert ORC stripe size + * [HIVE-7494] - ORC returns empty rows for constant folded date queries + * [HIVE-7508] - Kerberos support for streaming + * [HIVE-7514] - Vectorization does not handle constant expression whose value is NULL + * [HIVE-7521] - Reference equality is used on Boolean in NullScanOptimizer#WhereFalseProcessor#process() + * [HIVE-7522] - Update .q.out for cluster_tasklog_retrieval.q test + * [HIVE-7529] - load data query fails on hdfs federation + viewfs + * [HIVE-7531] - auxpath parameter does not handle paths relative to current working directory. + * [HIVE-7533] - sql std auth - set authorization privileges for tables when created from hive cli + * [HIVE-7538] - Fix eclipse:eclipse after HIVE-7496 + * [HIVE-7539] - streaming windowing UDAF seems to be broken without Partition Spec + * [HIVE-7553] - avoid the scheduling maintenance window for every jar change + * [HIVE-7557] - When reduce is vectorized, dynpart_sort_opt_vectorization.q under Tez fails + * [HIVE-7558] - HCatLoader reuses credentials across jobs + * [HIVE-7563] - ClassLoader should be released from LogFactory + * [HIVE-7574] - CommonJoinOperator.checkAndGenObject calls LOG.Trace per row from probe side in a HashMap join consuming 4% of the CPU + * [HIVE-7576] - Add PartitionSpec support in HCatClient API + * [HIVE-7579] - error message for 'drop admin role' in sql std auth mode is not informative + * [HIVE-7583] - Use FileSystem.access() if available to check file access for user + * [HIVE-7592] - List Jars or Files are not supported by Beeline + * [HIVE-7595] - isKerberosMode() does a case sensitive comparison + * [HIVE-7599] - NPE in MergeTask#main() when -format is absent + * [HIVE-7600] - ConstantPropagateProcFactory uses reference equality on Boolean + * [HIVE-7618] - TestDDLWithRemoteMetastoreSecondNamenode unit test failure + * [HIVE-7620] - Hive metastore fails to start in secure mode due to "java.lang.NoSuchFieldError: SASL_PROPS" error + * [HIVE-7623] - hive partition rename fails if filesystem cache is disabled + * [HIVE-7629] - Problem in SMB Joins between two Parquet tables + * [HIVE-7634] - Use Configuration.getPassword() if available to eliminate passwords from hive-site.xml + * [HIVE-7635] - Query having same aggregate functions but different case throws IndexOutOfBoundsException + * [HIVE-7637] - Change throws clause for Hadoop23Shims.ProxyFileSystem23.access() + * [HIVE-7638] - Disallow CREATE VIEW when created with a temporary table + * [HIVE-7645] - Hive CompactorMR job set NUM_BUCKETS mistake + * [HIVE-7647] - Beeline does not honor --headerInterval and --color when executing with "-e" + * [HIVE-7648] - authorization check api should provide table for create table,drop/create index, and db for create/switch db + * [HIVE-7649] - Support column stats with temporary tables + * [HIVE-7658] - Hive search order for hive-site.xml when using --config option + * [HIVE-7664] - VectorizedBatchUtil.addRowToBatchFrom is not optimized for Vectorized execution and takes 25% CPU + * [HIVE-7666] - Join selectivity calculation should use exponential back-off for conjunction predicates + * [HIVE-7667] - handle cast for long in get_aggr_stats() api for metastore for mysql + * [HIVE-7669] - parallel order by clause on a string column fails with IOException: Split points are out of order + * [HIVE-7673] - Authorization api: missing privilege objects in create table/view + * [HIVE-7676] - JDBC: Support more DatabaseMetaData, ResultSetMetaData methods + * [HIVE-7678] - add more test cases for tables qualified with database/schema name + * [HIVE-7680] - Do not throw SQLException for HiveStatement getMoreResults and setEscapeProcessing(false) + * [HIVE-7681] - qualified tablenames usage does not work with several alter-table commands + * [HIVE-7682] - HadoopThriftAuthBridge20S should not reset configuration unless required + * [HIVE-7683] - Test TestMinimrCliDriver.testCliDriver_ql_rewrite_gbtoidx is still failing + * [HIVE-7694] - SMB join on tables differing by number of sorted by columns with same join prefix fails + * [HIVE-7695] - hive stats issue when insert query is appending data into table + * [HIVE-7700] - authorization api - HivePrivilegeObject for permanent function should have database name set + * [HIVE-7701] - Upgrading tez to 0.4.1 causes metadata only query to fail. + * [HIVE-7704] - Create tez task for fast file merging + * [HIVE-7710] - Rename table across database might fail + * [HIVE-7712] - hive-exec-0.13.0.2.1.2.0-402.jar contains avro classes compiled against hadoop-v1 + * [HIVE-7722] - TestJdbcDriver2.testDatabaseMetaData fails after HIVE-7676 + * [HIVE-7723] - Explain plan for complex query with lots of partitions is slow due to in-efficient collection used to find a matching ReadEntity + * [HIVE-7730] - Extend ReadEntity to add accessed columns from query + * [HIVE-7733] - Ambiguous column reference error on query + * [HIVE-7738] - tez select sum(decimal) from union all of decimal and null throws NPE + * [HIVE-7741] - Don't synchronize WriterImpl.addRow() when dynamic.partition is enabled + * [HIVE-7744] - In Windowing Streaming mode Avg and Sum give incorrect results when Wdw size is same as partition size + * [HIVE-7753] - Same operand appears on both sides of > in DataType#compareByteArray() + * [HIVE-7759] - document hive cli authorization behavior when SQL std auth is enabled + * [HIVE-7760] - Constants in VirtualColumn should be final + * [HIVE-7764] - Support all JDBC-HiveServer2 authentication modes on a secure cluster + * [HIVE-7769] - add --SORT_BEFORE_DIFF to union all .q tests + * [HIVE-7770] - Undo backward-incompatible behaviour change introduced by HIVE-7341 + * [HIVE-7771] - ORC PPD fails for some decimal predicates + * [HIVE-7774] - Issues with location path for temporary external tables + * [HIVE-7777] - Add CSV Serde based on OpenCSV + * [HIVE-7784] - Created the needed indexes on Hive.PART_COL_STATS for CBO + * [HIVE-7786] - add --SORT_BEFORE_DIFF to union all tez .q.out files + * [HIVE-7787] - Reading Parquet file with enum in Thrift Encoding throws NoSuchFieldError + * [HIVE-7800] - Parquet Column Index Access Schema Size Checking + * [HIVE-7807] - Refer to umask property using FsPermission.UMASK_LABEL. + * [HIVE-7812] - Disable CombineHiveInputFormat when ACID format is used + * [HIVE-7813] - Hive join key not null shouldn't be generated for partition column + * [HIVE-7823] - HIVE-6185 removed Partition.getPartition + * [HIVE-7824] - CLIServer.getOperationStatus eats ExceutionException + * [HIVE-7828] - TestCLIDriver.parquet_join.q is failing on trunk + * [HIVE-7829] - Entity.getLocation can throw an NPE + * [HIVE-7834] - Use min, max and NDV from the stats to better estimate many to many vs one to many inner joins + * [HIVE-7840] - Generated hive-default.xml.template mistakenly refers to property "name"s as "key"s + * [HIVE-7841] - Case, When, Lead, Lag UDF is missing annotation + * [HIVE-7846] - authorization api should support group, not assume case insensitive role names + * [HIVE-7847] - query orc partitioned table fail when table column type change + * [HIVE-7851] - Fix NPE in split generation on Tez 0.5 + * [HIVE-7857] - Hive query fails after Tez session times out + * [HIVE-7859] - Tune zlib compression in ORC to account for the encoding strategy + * [HIVE-7863] - Potential null reference in TxnDbUtil#prepareDb() + * [HIVE-7865] - Extend TestFileDump test case to printout ORC row index information + * [HIVE-7878] - add -- SORT_BEFORE_DIFF to optimize_nullscan.q test + * [HIVE-7883] - DBTxnManager trying to close already closed metastore client connection + * [HIVE-7887] - VectorFileSinkOp does not publish the stats correctly + * [HIVE-7889] - Query fails with char partition column + * [HIVE-7890] - SessionState creates HMS Client while not impersonating + * [HIVE-7891] - Table-creation fails through HCatClient for Oracle-based metastore. + * [HIVE-7892] - Thrift Set type not working with Hive + * [HIVE-7895] - Storage based authorization should consider sticky bit for drop actions + * [HIVE-7897] - ObjectStore not using getPassword() for JDO connection string + * [HIVE-7899] - txnMgr should be session specific + * [HIVE-7901] - CLONE - pig -useHCatalog with embedded metastore fails to pass command line args to metastore (org.apache.hive.hcatalog version) + * [HIVE-7902] - Cleanup hbase-handler/pom.xml dependency list + * [HIVE-7911] - Guaranteed ClassCastException in AccumuloRangeGenerator + * [HIVE-7913] - Simplify filter predicates for CBO + * [HIVE-7914] - Simplify join predicates for CBO to avoid cross products + * [HIVE-7915] - Expose High and Low value in plan.ColStatistics + * [HIVE-7919] - sql std auth: user with 'admin option' for role should be able to list all users in the role + * [HIVE-7927] - Checking sticky bit needs shim + * [HIVE-7936] - Support for handling Thrift Union types + * [HIVE-7943] - hive.security.authorization.createtable.owner.grants is ineffective with Default Authorization + * [HIVE-7944] - current update stats for columns of a partition of a table is not correct + * [HIVE-7946] - CBO: Merge CBO changes to Trunk + * [HIVE-7949] - Create table LIKE command doesn't set new owner + * [HIVE-7950] - StorageHandler resources aren't added to Tez Session if already Session is already Open + * [HIVE-7957] - Revisit event version handling in dynamic partition pruning on Tez + * [HIVE-7971] - Support alter table change/replace/add columns for existing partitions + * [HIVE-7972] - hiveserver2 specific configuration file is not getting used + * [HIVE-7976] - Merge tez branch into trunk (tez 0.5.0) + * [HIVE-7982] - Regression in explain with CBO enabled due to issuing query per K,V for the stats + * [HIVE-7984] - AccumuloOutputFormat Configuration items from StorageHandler not re-set in Configuration in Tez + * [HIVE-7985] - With CBO enabled cross product is generated when a subquery is present + * [HIVE-7987] - Storage based authorization - NPE for drop view + * [HIVE-7993] - With CBO enabled Q75 fails with RuntimeException: cannot find field _col69 from [0:_col18,...] + * [HIVE-8002] - Deprecate the use of JDBC client only parameters from the map used to transfer HiveConf configs to the server. + * [HIVE-8008] - NPE while reading null decimal value + * [HIVE-8012] - TestHiveServer2Concurrency is not implemented + * [HIVE-8018] - Fix typo in config var name for dynamic partition pruning + * [HIVE-8019] - Missing hive 0.13.1 commit in trunk : export/import statement authorization - CVE-2014-0228 + * [HIVE-8022] - Recursive root scratch directory creation is not using hdfs umask properly + * [HIVE-8023] - Code in HIVE-6380 eats exceptions + * [HIVE-8031] - CBO needs to scale down NDV with selectivity to avoid underestimating + * [HIVE-8034] - Don't add colon when no port is specified + * [HIVE-8041] - Hadoop-2 build is broken with JDK6 + * [HIVE-8044] - Container size and hash table size should be taken into account before deciding to do a MapJoin + * [HIVE-8045] - SQL standard auth with cli - Errors and configuration issues + * [HIVE-8047] - Lazy char/varchar are not using escape char defined in serde params + * [HIVE-8051] - Some union queries fail with dynamic partition pruning on tez + * [HIVE-8052] - Vectorization: min() on TimeStamp datatype fails with error "Vector aggregate not implemented: min for type: TIMESTAMP" + * [HIVE-8056] - SessionState.dropSessionPaths should use FileSystem.getLocal(conf) to delete local files + * [HIVE-8062] - Stats collection for columns fails on a partitioned table with null values in partitioning column + * [HIVE-8071] - hive shell tries to write hive-exec.jar for each run + * [HIVE-8078] - ORC Delta encoding corrupts data when delta overflows long + * [HIVE-8081] - "drop index if exists" fails if table specified does not exist + * [HIVE-8082] - generateErrorMessage doesn't handle null ast properly + * [HIVE-8083] - Authorization DDLs should not enforce hive identifier syntax for user or group + * [HIVE-8085] - stats optimizer should not use Description annotation to figure out function mapping (because FunctionRegistry doesn't) + * [HIVE-8090] - Potential null pointer reference in WriterImpl#StreamFactory#createStream() + * [HIVE-8092] - Vectorized Tez count(*) returns NULL instead of 0 when result is empty + * [HIVE-8095] - Tez and Vectorized GROUP BY: ClassCastException: ...HiveDecimal cannot be cast to ...HiveDecimalWritable + * [HIVE-8099] - IN operator for partition column fails when the partition column type is DATE + * [HIVE-8101] - Hive on spark Issue during compiling hive source + * [HIVE-8102] - Partitions of type 'date' behave incorrectly with daylight saving time. + * [HIVE-8103] - Read ACID tables with FetchOperator returns no rows + * [HIVE-8104] - Insert statements against ACID tables NPE when vectorization is on + * [HIVE-8105] - booleans and nulls not handled properly in insert/values + * [HIVE-8107] - Bad error message for non-existent table in update and delete + * [HIVE-8112] - Change reporting string to reflect update in Tez + * [HIVE-8114] - Type resolution for udf arguments of Decimal Type results in error + * [HIVE-8115] - Hive select query hang when fields contain map + * [HIVE-8126] - Standalone hive-jdbc jar is not packaged in the Hive distribution + * [HIVE-8138] - Global Init file should allow specifying file name not only directory + * [HIVE-8139] - Upgrade commons-lang from 2.4 to 2.6 + * [HIVE-8142] - Add merge operators to queryplan.thrift instead of generated source file + * [HIVE-8143] - Create root scratch dir with 733 instead of 777 perms + * [HIVE-8146] - Test TestTempletonUtils.testFindContainingJar failing + * [HIVE-8148] - HDFS Path named with file:// instead of file:/// results in Unit test failures in Windows + * [HIVE-8149] - hive.optimize.reducededuplication should be set to false for IUD ops + * [HIVE-8151] - Dynamic partition sort optimization inserts record wrongly to partition when used with GroupBy + * [HIVE-8152] - Update with expression in set fails + * [HIVE-8153] - Reduce the verbosity of debug logs in ORC record reader + * [HIVE-8154] - HadoopThriftAuthBridge20S.getHadoopSaslProperties is incompatible with Hadoop 2.4.1 and later + * [HIVE-8156] - Vectorized reducers need to avoid memory build-up during a single key + * [HIVE-8158] - Optimize writeValue/setValue in VectorExpressionWriterFactory (in VectorReduceSinkOperator codepath) + * [HIVE-8162] - Dynamic sort optimization propagates additional columns even in the absence of order by + * [HIVE-8167] - mvn install command broken by HIVE-8126 commit + * [HIVE-8169] - Windows: alter table ..set location from hcatalog failed with NullPointerException + * [HIVE-8170] - Hive Metastore schema script missing for mssql for v0.14.0 + * [HIVE-8171] - Tez and Vectorized Reduce doesn't create scratch columns + * [HIVE-8175] - Hive metastore upgrade from v0.13.0 to v0.14.0 script for Oracle is missing an upgrade step + * [HIVE-8178] - OrcNewInputFormat::getSplits() calls OrcInputFormat.generateSplitsInfo twice + * [HIVE-8179] - Fetch task conversion: Remove some dependencies on AST + * [HIVE-8184] - inconsistence between colList and columnExprMap when ConstantPropagate is applied to subquery + * [HIVE-8185] - hive-jdbc-0.14.0-SNAPSHOT-standalone.jar fails verification for signatures in build + * [HIVE-8188] - ExprNodeGenericFuncEvaluator::_evaluate() loads class annotations in a tight loop + * [HIVE-8189] - A select statement with a subquery is failing with HBaseSerde + * [HIVE-8191] - Update and delete on tables with non Acid output formats gives runtime error + * [HIVE-8196] - Joining on partition columns with fetch column stats enabled results it very small CE which negatively affects query performance + * [HIVE-8200] - Make beeline use the hive-jdbc standalone jar + * [HIVE-8201] - Remove hardwiring to HiveInputFormat in acid qfile tests + * [HIVE-8203] - ACID operations result in NPE when run through HS2 + * [HIVE-8205] - Using strings in group type fails in ParquetSerDe + * [HIVE-8210] - TezJobMonitor should print time spent in Application (RUNNING) + * [HIVE-8212] - Regression for hcat commandline alter view set tblproperties + * [HIVE-8217] - WebHCat 'jobs' endpoint fails if it runs into issues with any of the jobs + * [HIVE-8221] - authorize additional metadata read operations in metastore storage based authorization + * [HIVE-8225] - CBO trunk merge: union11 test fails due to incorrect plan + * [HIVE-8226] - Vectorize dynamic partitioning in VectorFileSinkOperator + * [HIVE-8227] - NPE w/ hive on tez when doing unions on empty tables + * [HIVE-8229] - Add multithreaded tests for the Hive Writable data types + * [HIVE-8231] - Error when insert into empty table with ACID + * [HIVE-8235] - Insert into partitioned bucketed sorted tables fails with "this file is already being created by" + * [HIVE-8236] - VectorHashKeyWrapper allocates too many zero sized arrays + * [HIVE-8239] - MSSQL upgrade schema scripts does not map Java long datatype columns correctly for transaction related tables + * [HIVE-8240] - VectorColumnAssignFactory throws "Incompatible Bytes vector column and primitive category VARCHAR" + * [HIVE-8241] - With vectorization enabled count(distinct)) fails with ClassCastException + * [HIVE-8246] - HiveServer2 in http-kerberos mode is restrictive on client usernames + * [HIVE-8248] - TestHCatLoader.testReadDataPrimitiveTypes() occasionally fails + * [HIVE-8250] - Truncating table doesnt invalidate stats + * [HIVE-8257] - Accumulo introduces old hadoop-client dependency + * [HIVE-8258] - Compactor cleaners can be starved on a busy table or partition. + * [HIVE-8260] - CBO : Query query has date_dim d1,date_dim d2 and date_dim d3 but the explain has d1, d1 and d1 + * [HIVE-8261] - CBO : Predicate pushdown is removed by Optiq + * [HIVE-8263] - CBO : TPC-DS Q64 is item is joined last with store_sales while it should be first as it is the most selective + * [HIVE-8269] - Revert HIVE-8200 (Make beeline use the hive-jdbc standalone jar) + * [HIVE-8270] - JDBC uber jar is missing some classes required in secure setup. + * [HIVE-8271] - Jackson incompatibility between hadoop-2.4 and hive-14 + * [HIVE-8272] - Query with particular decimal expression causes NPE during execution initialization + * [HIVE-8273] - Beeline doesn't print applicationID for submitted DAG + * [HIVE-8277] - IP address string in HS2, metastore have a "/" prefix + * [HIVE-8279] - sql std auth - additional test cases + * [HIVE-8280] - CBO : When filter is applied on dimension table PK/FK code path is not in effect. + * [HIVE-8281] - NPE with dynamic partition pruning on Tez + * [HIVE-8283] - Missing break in FilterSelectivityEstimator#visitCall() + * [HIVE-8284] - Equality comparison is done between two floating point variables in HiveRelMdUniqueKeys#getUniqueKeys() + * [HIVE-8287] - Metadata action errors don't have information about cause + * [HIVE-8290] - With DbTxnManager configured, all ORC tables forced to be transactional + * [HIVE-8291] - ACID : Reading from partitioned bucketed tables has high overhead, 50% of time is spent in OrcInputFormat.getReader + * [HIVE-8292] - Reading from partitioned bucketed tables has high overhead in MapOperator.cleanUpInputFileChangedOp + * [HIVE-8296] - Tez ReduceShuffle Vectorization needs 2 data buffers (key and value) for adding rows + * [HIVE-8298] - Incorrect results for n-way join when join expressions are not in same order across joins + * [HIVE-8299] - HiveServer2 in http-kerberos & doAs=true is failing with org.apache.hadoop.security.AccessControlException + * [HIVE-8304] - Tez Reduce-Side GROUP BY Vectorization doesn't copy NULL keys correctly + * [HIVE-8306] - Map join sizing done by auto.convert.join.noconditionaltask.size doesn't take into account Hash table overhead and results in OOM + * [HIVE-8310] - RetryingHMSHandler is not used when kerberos auth enabled + * [HIVE-8311] - Driver is encoding transaction information too late + * [HIVE-8313] - Optimize evaluation for ExprNodeConstantEvaluator and ExprNodeNullEvaluator + * [HIVE-8314] - Restore thrift string interning of HIVE-7975 + * [HIVE-8315] - CBO : Negate condition underestimates selectivity which results in an in-efficient plan + * [HIVE-8316] - CBO : cardinality estimation for filters is much lower than actual row count + * [HIVE-8318] - Null Scan optimizer throws exception when no partitions are selected + * [HIVE-8321] - Fix serialization of TypeInfo for qualified types + * [HIVE-8322] - VectorReduceSinkOperator: ClassCastException: ~StandardUnionObjectInspector$StandardUnion cannot be cast to ~IntWritable + * [HIVE-8324] - Shim KerberosName (causes build failure on hadoop-1) + * [HIVE-8328] - MapJoin implementation in Tez should not reload hashtables + * [HIVE-8332] - Reading an ACID table with vectorization on results in NPE + * [HIVE-8335] - TestHCatLoader/TestHCatStorer failures on pre-commit tests + * [HIVE-8336] - Update pom, now that Optiq is renamed to Calcite + * [HIVE-8340] - HiveServer2 service doesn't stop backend jvm process, which prevents follow-up service start. + * [HIVE-8341] - Transaction information in config file can grow excessively large + * [HIVE-8344] - Hive on Tez sets mapreduce.framework.name to yarn-tez + * [HIVE-8348] - Fix Hive to match changes introduced by TEZ-1510 + * [HIVE-8349] - DISTRIBUTE BY should work with tez auto-parallelism enabled + * [HIVE-8354] - HIVE-7156 introduced required dependency on tez + * [HIVE-8361] - NPE in PTFOperator when there are empty partitions + * [HIVE-8363] - AccumuloStorageHandler compile failure hadoop-1 + * [HIVE-8364] - We're not waiting for all inputs in MapRecordProcessor on Tez + * [HIVE-8366] - CBO fails if there is a table sample in subquery + * [HIVE-8367] - delete writes records in wrong order in some cases + * [HIVE-8368] - compactor is improperly writing delete records in base file + * [HIVE-8372] - Potential NPE in Tez MergeFileRecordProcessor + * [HIVE-8378] - NPE in TezTask due to null counters + * [HIVE-8380] - NanoTime class serializes and deserializes Timestamp incorrectly + * [HIVE-8386] - HCAT api call is case sensitive on fields in struct column + * [HIVE-8387] - add retry logic to ZooKeeperStorage in WebHCat + * [HIVE-8389] - Fix CBO when indexes are used + * [HIVE-8390] - CBO produces annoying exception message and wraps exceptions too much + * [HIVE-8391] - Comparion between TIMESTAMP and Integer types goes to STRING as "common comparison denominator" instead of a numeric type + * [HIVE-8392] - HiveServer2 Operation.close fails on windows + * [HIVE-8393] - Handle SIGINT on Tez + * [HIVE-8394] - HIVE-7803 doesn't handle Pig MultiQuery, can cause data-loss. + * [HIVE-8399] - Build failure on trunk & 14 branch + * [HIVE-8400] - Fix building and packaging hwi war file + * [HIVE-8401] - OrcFileMergeOperator only close last orc file it opened, which resulted in stale data in table directory + * [HIVE-8402] - Orc pushing SARGs into delta files causing ArrayOutOfBoundsExceptions + * [HIVE-8403] - Build broken by datanucleus.org being offline + * [HIVE-8404] - ColumnPruner doesnt prune columns from limit operator + * [HIVE-8407] - [CBO] Handle filters with non-boolean return type + * [HIVE-8408] - hcat cli throws NPE when authorizer using new api is enabled + * [HIVE-8409] - SMB joins fail intermittently on tez + * [HIVE-8411] - Support partial partition spec for certain ALTER PARTITION statements + * [HIVE-8413] - [CBO] Handle ill-formed queries which have distinct, having in incorrect context + * [HIVE-8415] - Vectorized comparison of timestamp and integer needs to treat integer as seconds since epoch + * [HIVE-8417] - round(decimal, negative) errors out/wrong results with reduce side vectorization + * [HIVE-8421] - [CBO] Use OptiqSemanticException in error conditions + * [HIVE-8427] - Hive Streaming : secure streaming hangs leading to time outs. + * [HIVE-8429] - Add records in/out counters + * [HIVE-8433] - CBO loses a column during AST conversion + * [HIVE-8434] - Vectorization logic using wrong values for DATE and TIMESTAMP partitioning columns in vectorized row batches... + * [HIVE-8442] - Revert HIVE-8403 + * [HIVE-8444] - update pom to junit 4.11 + * [HIVE-8445] - TestColumnAccess, TestReadEntityDirect use same table names + * [HIVE-8452] - Cleanup handling of resource configuration for tez + * [HIVE-8460] - ORC SARG literal creation for double from float may lead to wrong evaluation of SARG + * [HIVE-8461] - Make Vectorized Decimal query results match Non-Vectorized query results with respect to trailing zeroes... .0000 + * [HIVE-8462] - CBO duplicates columns + * [HIVE-8464] - Vectorized reducer nested group by query returns wrong results + * [HIVE-8474] - Vectorized reads of transactional tables fail when not all columns are selected + * [HIVE-8475] - add test case for use of index from not-current database + * [HIVE-8476] - JavaDoc updates to HiveEndPoint.newConnection() for secure streaming with Kerberos + * [HIVE-8478] - Vectorized Reduce-Side Group By doesn't handle Decimal type correctly + * [HIVE-8479] - Tez sessions cannot change queues once assigned to one within a CLI session + * [HIVE-8484] - HCatalog throws an exception if Pig job is of type 'fetch' + * [HIVE-8489] - Add sanity check to dynamic partition pruning + * [HIVE-8495] - Add progress bar for Hive on Tez queries + * [HIVE-8497] - StatsNoJobTask doesn't close RecordReader, FSDataInputStream of which keeps open to prevent stale data clean + * [HIVE-8498] - Insert into table misses some rows when vectorization is enabled + * [HIVE-8510] - HIVE-8462 didn't update tez test output + * [HIVE-8511] - fix build failure: cbo_correctness on tez + * [HIVE-8514] - TestCliDriver.testCliDriver_index_in_db fails in trunk + * [HIVE-8517] - When joining on partition column NDV gets overridden by StatsUtils.getColStatisticsFromExpression + * [HIVE-8524] - When table is renamed stats are lost as changes are not propagated to metastore tables TAB_COL_STATS and PART_COL_STATS + * [HIVE-8526] - Hive : CBO incorrect join order in TPC-DS Q45 as self join selectivity has incorrect CE + * [HIVE-8534] - sql std auth : update configuration whitelist for 0.14 + * [HIVE-8543] - Compactions fail on metastore using postgres + * [HIVE-8546] - Handle "add archive scripts.tar.gz" in Tez + * [HIVE-8547] - CBO and/or constant propagation breaks partition_varchar2 test + * [HIVE-8550] - Hive cannot load data into partitioned table with Unicode key + * [HIVE-8551] - NPE in FunctionRegistry (affects CBO in negative tests) + * [HIVE-8555] - Too many casts results in loss of original string representation for constant + * [HIVE-8557] - automatically setup ZooKeeperTokenStore to use kerberos authentication when kerberos is enabled + * [HIVE-8558] - CBO: enable n-way joins after CBO join reordering + * [HIVE-8560] - SerDes that do not inherit AbstractSerDe do not get table properties during initialize() + * [HIVE-8562] - ResultSet.isClosed sometimes doesn't work with mysql + * [HIVE-8563] - Running annotate_stats_join_pkfk.q in TestMiniTezCliDriver is causing NPE + * [HIVE-8566] - Vectorized queries output wrong timestamps + * [HIVE-8567] - Vectorized queries output extra stuff for Binary columns + * [HIVE-8575] - CBO: decimal_udf is broken by recent changes (and other tests have type changes) + * [HIVE-8576] - Guaranteed NPE in StatsRulesProcFactory + * [HIVE-8577] - Cannot deserialize Avro schema with a map with null values + * [HIVE-8579] - Guaranteed NPE in DDLSemanticAnalyzer + * [HIVE-8586] - Record counters aren't updated correctly for vectorized queries + * [HIVE-8587] - Vectorized Extract operator needs to update the Vectorization Context column map + * [HIVE-8588] - sqoop REST endpoint fails to send appropriate JDBC driver to the cluster + * [HIVE-8596] - HiveServer2 dynamic service discovery: ZK throws too many connections error + * [HIVE-8603] - auto_sortmerge_join_5 is getting stuck on tez + * [HIVE-8604] - Re-enable auto_sortmerge_join_5 on tez + * [HIVE-8605] - HIVE-5799 breaks backward compatibility for time values in config + * [HIVE-8611] - grant/revoke syntax should support additional objects for authorization plugins + * [HIVE-8612] - Support metadata result filter hooks + * [HIVE-8614] - Upgrade hive to use tez version 0.5.2-SNAPSHOT + * [HIVE-8615] - beeline csv,tsv outputformat needs backward compatibility mode + * [HIVE-8619] - CBO causes some more type problems + * [HIVE-8620] - CBO: HIVE-8433 RowResolver check is too stringent + * [HIVE-8624] - Record counters don't work with Tez container reuse + * [HIVE-8625] - Some union queries result in plans with many unions with CBO on + * [HIVE-8628] - NPE in case of shuffle join in tez + * [HIVE-8629] - Streaming / ACID : hive cli session creation takes too long and times out if execution engine is tez + * [HIVE-8631] - Compressed transaction list cannot be parsed in job.xml + * [HIVE-8632] - VectorKeyHashWrapper::duplicateTo allocates too many zero sized arrays + * [HIVE-8634] - HiveServer2 fair scheduler queue mapping doesn't handle the secondary groups rules correctly + * [HIVE-8635] - CBO: ambiguous_col negative test no longer fails + * [HIVE-8641] - Disable skew joins in tez. + * [HIVE-8643] - DDL operations via WebHCat with doAs parameter in secure cluster fail + * [HIVE-8646] - Hive class loading failure when executing Hive action via oozie workflows + * [HIVE-8647] - HIVE-8186 causes addition of same child operator multiple times + * [HIVE-8660] - sql std auth: property missing from whitelist - hive.exec.dynamic.partition.mode + * [HIVE-8663] - Fetching Vectorization scratch column map in Reduce-Side stop working + * [HIVE-8664] - Use Apache Curator in JDBC Driver and HiveServer2 for better reliability + * [HIVE-8665] - Fix misc unit tests on Windows + * [HIVE-8668] - mssql sql script has carriage returns + * [HIVE-8671] - Overflow in estimate row count and data size with fetch column stats + * [HIVE-8675] - Increase thrift server protocol test coverage + * [HIVE-8677] - TPC-DS Q51 : fails with "init not supported" exception in GenericUDAFStreamingEvaluator.init + * [HIVE-8683] - User name and group name cannot be the same when grant role + * [HIVE-8685] - DDL operations in WebHCat set proxy user to "null" in unsecure mode + * [HIVE-8687] - Support Avro through HCatalog + * [HIVE-8688] - serialized plan OutputStream is not being closed + * [HIVE-8689] - handle overflows in statistics better + * [HIVE-8697] - Vectorized round(decimal, negative) produces wrong results + * [HIVE-8698] - default log4j.properties not included in jar files anymore + * [HIVE-8703] - More Windows unit test fixes + * [HIVE-8704] - HivePassThroughOutputFormat cannot proxy more than one kind of OF (in one process) + * [HIVE-8705] - Support using pre-authenticated subject in kerberized HiveServer2 HTTP mode + * [HIVE-8711] - DB deadlocks not handled in TxnHandler for Postgres, Oracle, and SQLServer + * [HIVE-8713] - Unit test TestParquetTimestampUtils.testTimezone failing + * [HIVE-8714] - getDatabase reports direct SQL error when database is missing + * [HIVE-8715] - Hive 14 upgrade scripts can fail for statistics if database was created using auto-create + * [HIVE-8720] - Update orc_merge tests to make it consistent across OS'es + * [HIVE-8723] - Set reasonable connection timeout for CuratorFramework ZooKeeper clients in Hive + * [HIVE-8724] - Right outer join produces incorrect result on Tez + * [HIVE-8727] - Dag summary has incorrect row counts and duration per vertex + * [HIVE-8732] - ORC string statistics are not merged correctly + * [HIVE-8733] - HiveServer2 dynamic service discovery not picking correct IP address when hive.server2.thrift.bind.host is not set + * [HIVE-8735] - statistics update can fail due to long paths + * [HIVE-8736] - add ordering to cbo_correctness to make result consistent + * [HIVE-8737] - setEnv is not portable, which fails TestCliDriverMethods#testprocessInitFiles on Windows + * [HIVE-8740] - Sorted dynamic partition does not work correctly with constant folding + * [HIVE-8745] - Joins on decimal keys return different results whether they are run as reduce join or map join + * [HIVE-8747] - Estimate number of rows for table with 0 rows overflows resulting in an in-efficient plan + * [HIVE-8752] - Disjunction cardinality estimation has selectivity of 1 + * [HIVE-8754] - Sqoop job submission via WebHCat doesn't properly localize required jdbc jars in secure cluster + * [HIVE-8759] - HiveServer2 dynamic service discovery should add hostname instead of ipaddress to ZooKeeper + * [HIVE-8764] - Windows: HiveServer2 TCP SSL cannot recognize localhost + * [HIVE-8766] - Hive RetryHMSHandler should be retrying the metastore operation in case of NucleusException + * [HIVE-8771] - Abstract merge file operator does not move/rename incompatible files correctly + * [HIVE-8772] - zookeeper info logs are always printed from beeline with service discovery mode + * [HIVE-8778] - ORC split elimination can cause NPE when column statistics is null + * [HIVE-8781] - Nullsafe joins are busted on Tez + * [HIVE-8782] - HBase handler doesn't compile with hadoop-1 + * [HIVE-8785] - HiveServer2 LogDivertAppender should be more selective for beeline getLogs + * [HIVE-8794] - Hive on Tez leaks AMs when killed before first dag is run + * [HIVE-8798] - Some Oracle deadlocks not being caught in TxnHandler + * [HIVE-8799] - boatload of missing apache headers + + + + +** Improvement + * [HIVE-538] - make hive_jdbc.jar self-containing + * [HIVE-860] - Persistent distributed cache + * [HIVE-2365] - SQL support for bulk load into HBase + * [HIVE-3005] - Skip execution phase for queries that contain "LIMIT 0" clause + * [HIVE-3006] - Skip execution of queries with always false WHERE clauses + * [HIVE-3595] - Hive should adapt new FsShell commands since Hadoop 2 has changed FsShell argument structures + * [HIVE-3635] - allow 't', 'T', '1', 'f', 'F', and '0' to be allowable true/false values for the boolean hive type + * [HIVE-3684] - Add support for filter pushdown for composite keys + * [HIVE-3907] - Hive should support adding multiple resources at once + * [HIVE-4867] - Deduplicate columns appearing in both the key list and value list of ReduceSinkOperator + * [HIVE-4997] - HCatalog doesn't allow multiple input tables + * [HIVE-5072] - [WebHCat]Enable directly invoke Sqoop job through Templeton + * [HIVE-5160] - HS2 should support .hiverc + * [HIVE-5298] - AvroSerde performance problem caused by HIVE-3833 + * [HIVE-5370] - format_number udf should take user specifed format as argument + * [HIVE-5408] - Method matching needs to be improved in NumericOpMethodResolver to suport decimal type with parameters + * [HIVE-5447] - HiveServer2 should allow secure impersonation over LDAP or other non-kerberos connection + * [HIVE-5652] - Improve JavaDoc of UDF class + * [HIVE-5771] - Constant propagation optimizer for Hive + * [HIVE-5799] - session/operation timeout for hiveserver2 + * [HIVE-5821] - Evaluate the usefulness of UNKNOWN state in HiveServer2 state transitions and get rid of it if not required + * [HIVE-5871] - Use multiple-characters as field delimiter + * [HIVE-5961] - Add explain authorize for checking privileges + * [HIVE-6024] - Load data local inpath unnecessarily creates a copy task + * [HIVE-6037] - Synchronize HiveConf with hive-default.xml.template and support show conf + * [HIVE-6089] - Add metrics to HiveServer2 + * [HIVE-6132] - Support hbase filters for Hive HBase Integration + * [HIVE-6147] - Support avro data stored in HBase columns + * [HIVE-6304] - Update HCatReader/Writer docs to reflect recent changes + * [HIVE-6325] - Enable using multiple concurrent sessions in tez + * [HIVE-6410] - Allow output serializations separators to be set for HDFS path as well. + * [HIVE-6411] - Support more generic way of using composite key for HBaseHandler + * [HIVE-6430] - MapJoin hash table has large memory overhead + * [HIVE-6438] - Sort query result for test, removing order by clause + * [HIVE-6473] - Allow writing HFiles via HBaseStorageHandler table + * [HIVE-6510] - Clean up math based UDFs + * [HIVE-6561] - Beeline should accept -i option to Initializing a SQL file + * [HIVE-6584] - Add HiveHBaseTableSnapshotInputFormat + * [HIVE-6593] - Create a maven assembly for hive-jdbc + * [HIVE-6657] - Add test coverage for Kerberos authentication implementation using Hadoop's miniKdc + * [HIVE-6677] - HBaseSerDe needs to be refactored + * [HIVE-6691] - support decimals for optimized hashmap keys in MapJoin + * [HIVE-6694] - Beeline should provide a way to execute shell command as Hive CLI does + * [HIVE-6799] - HiveServer2 needs to map kerberos name to local name before proxy check + * [HIVE-6899] - Add an ability to specify the type of execution to use (async/sync execution) on JDBC client + * [HIVE-6920] - Parquet Serde Simplification + * [HIVE-6923] - Use slf4j For Logging Everywhere + * [HIVE-6924] - MapJoinKeyBytes::hashCode() should use Murmur hash + * [HIVE-6938] - Add Support for Parquet Column Rename + * [HIVE-6973] - HiveServer2 should support a non-kerberos user authentication on a secure cluster. + * [HIVE-6999] - Add streaming mode to PTFs + * [HIVE-7000] - Several issues with javadoc generation + * [HIVE-7022] - Replace BinaryWritable with BytesWritable in Parquet serde + * [HIVE-7026] - Support newly added role related APIs for v1 authorizer + * [HIVE-7048] - CompositeKeyHBaseFactory should not use FamilyFilter + * [HIVE-7100] - Users of hive should be able to specify skipTrash when dropping tables. + * [HIVE-7127] - Handover more details on exception in hiveserver2 + * [HIVE-7136] - Allow Hive to read hive scripts from any of the supported file systems in hadoop eco-system + * [HIVE-7137] - Add progressable to writer interfaces so they could report progress while different operations are in progress + * [HIVE-7138] - add row index dump capability to ORC file dump + * [HIVE-7140] - Bump default hive.metastore.client.socket.timeout to 5 minutes + * [HIVE-7142] - Hive multi serialization encoding support + * [HIVE-7168] - Don't require to name all columns in analyze statements if stats collection is for all columns + * [HIVE-7196] - Configure session by single open session call + * [HIVE-7208] - move SearchArgument interface into serde package + * [HIVE-7211] - Throws exception if the name of conf var starts with "hive." does not exists in HiveConf + * [HIVE-7219] - Improve performance of serialization utils in ORC + * [HIVE-7222] - Support timestamp column statistics in ORC and extend PPD for timestamp + * [HIVE-7223] - Support generic PartitionSpecs in Metastore partition-functions + * [HIVE-7230] - Add Eclipse formatter file for Hive coding conventions + * [HIVE-7231] - Improve ORC padding + * [HIVE-7243] - Print padding information in ORC file dump + * [HIVE-7250] - Adaptive compression buffer size for wide tables in ORC + * [HIVE-7361] - using authorization api for RESET, DFS, ADD, DELETE, COMPILE commands + * [HIVE-7386] - PTest support non-spot instances and higher cpu masters + * [HIVE-7432] - Remove deprecated Avro's Schema.parse usages + * [HIVE-7445] - Improve LOGS for Hive when a query is not able to acquire locks + * [HIVE-7495] - Print dictionary size in orc file dump + * [HIVE-7519] - Refactor QTestUtil to remove its duplication with QFileClient for qtest setup and teardown + * [HIVE-7532] - allow disabling direct sql per query with external metastore + * [HIVE-7543] - Cleanup of org.apache.hive.service.auth package + * [HIVE-7549] - Code cleanup of Task.java and HiveInputFormat.java + * [HIVE-7554] - Parquet Hive should resolve column names in case insensitive manner + * [HIVE-7562] - Cleanup ExecReducer + * [HIVE-7596] - Cleanup OperatorFactory, ReduceSinkOperator, and reportStats + * [HIVE-7609] - Collect partition level stats by default + * [HIVE-7615] - Beeline should have an option for user to see the query progress + * [HIVE-7616] - pre-size mapjoin hashtable based on statistics + * [HIVE-7689] - Fix wrong lower case table names in Postgres Metastore back end + * [HIVE-7697] - PlanUtils.getTableDesc uses printStackTrace and returns null + * [HIVE-7705] - there's a useless threadlocal in LBUtils that shows up in perf profiles + * [HIVE-7736] - improve the columns stats update speed for all the partitions of a table + * [HIVE-7737] - Hive logs full exception for table not found + * [HIVE-7757] - PTest2 separates test files with spaces while QTestGen uses commas + * [HIVE-7818] - Support boolean PPD for ORC + * [HIVE-7832] - Do ORC dictionary check at a finer level and preserve encoding across stripes + * [HIVE-7833] - Remove unwanted allocation in ORC RunLengthIntegerWriterV2 determine encoding function + * [HIVE-7876] - further improve the columns stats update speed for all the partitions of a table + * [HIVE-7885] - CLIServer.openSessionWithImpersonation logs as if it were openSession + * [HIVE-7912] - Don't add is not null filter for partitioning column + * [HIVE-7921] - Fix confusing dead assignment in return statement (JavaHiveVarcharObjectInspector) + * [HIVE-7923] - populate stats for test tables + * [HIVE-7925] - extend current partition status extrapolation to support all DBs + * [HIVE-7931] - Convert all tabs to spaces [code cleanup] + * [HIVE-7947] - Add message at the end of each testcase with timestamp in Webhcat system tests + * [HIVE-7975] - HS2 memory optimization: Internalizing instance fields of Thrift-generated metastore API classes + * [HIVE-8036] - PTest SSH Options + * [HIVE-8038] - Decouple ORC files split calculation logic from Filesystem's get file location implementation + * [HIVE-8042] - Optionally allow move tasks to run in parallel + * [HIVE-8096] - Fix a few small nits in TestExtendedAcls + * [HIVE-8100] - Add QTEST_LEAVE_FILES to QTestUtil + * [HIVE-8137] - Empty ORC file handling + * [HIVE-8245] - Collect table read entities at same time as view read entities + * [HIVE-8320] - Error in MetaException(message:Got exception: org.apache.thrift.transport.TTransportException java.net.SocketTimeoutException: Read timed out) + * [HIVE-8350] - Constant folding should happen before group-by optimization + * [HIVE-8358] - Constant folding should happen before PCR + * [HIVE-8385] - UNION Operator in Hive + * [HIVE-8428] - PCR doesnt remove filters involving casts + * [HIVE-8490] - Constant folding should happen before partition pruning + * [HIVE-8492] - Enhance Constant Folding to propagate constants for simple expressions + * [HIVE-8501] - Fix CBO to use indexes when GenericUDFBridge is applied + * [HIVE-8585] - Constant folding should happen before ppd + * [HIVE-8597] - SMB join small table side should use the same set of serialized payloads across tasks + * [HIVE-8598] - Push constant filters through joins + * [HIVE-8748] - jdbc uber jar is missing commons-logging + * [HIVE-8779] - Tez in-place progress UI can show wrong estimated time for sub-second queries + +** New Feature + * [HIVE-5317] - Implement insert, update, and delete in Hive with full ACID support + * [HIVE-5823] - Support for DECIMAL primitive type in AvroSerDe + * [HIVE-5908] - Use map-join hint to cache intermediate result + * [HIVE-6100] - Introduce basic set operations as UDFs + * [HIVE-6455] - Scalable dynamic partitioning and bucketing optimization + * [HIVE-6469] - skipTrash option in hive command line + * [HIVE-6806] - CREATE TABLE should support STORED AS AVRO + * [HIVE-7036] - get_json_object bug when extract list of list with index + * [HIVE-7054] - Support ELT UDF in vectorized mode + * [HIVE-7068] - Integrate AccumuloStorageHandler + * [HIVE-7090] - Support session-level temporary tables in Hive + * [HIVE-7122] - Storage format for create like table + * [HIVE-7158] - Use Tez auto-parallelism in Hive + * [HIVE-7203] - Optimize limit 0 + * [HIVE-7233] - File hive-hwi-0.13.1 not found on lib folder + * [HIVE-7255] - Allow partial partition spec in analyze command + * [HIVE-7299] - Enable metadata only optimization on Tez + * [HIVE-7341] - Support for Table replication across HCatalog instances + * [HIVE-7390] - Make quote character optional and configurable in BeeLine CSV/TSV output + * [HIVE-7416] - provide context information to authorization checkPrivileges api call + * [HIVE-7430] - Implement SMB join in tez + * [HIVE-7446] - Add support to ALTER TABLE .. ADD COLUMN to Avro backed tables + * [HIVE-7506] - MetadataUpdater: provide a mechanism to edit the statistics of a column in a table (or a partition of a table) + * [HIVE-7509] - Fast stripe level merging for ORC + * [HIVE-7547] - Add ipAddress and userName to ExecHook + * [HIVE-7587] - Fetch aggregated stats from MetaStore + * [HIVE-7604] - Add Metastore API to fetch one or more partition names + * [HIVE-7654] - A method to extrapolate columnStats for partitions of a table + * [HIVE-7826] - Dynamic partition pruning on Tez + * [HIVE-8267] - Exposing hbase cell latest timestamp through hbase columns mappings to hive columns. + * [HIVE-8376] - Umbrella Jira for HiveServer2 dynamic service discovery + * [HIVE-8531] - Fold is not null filter if there are other comparison filter present on same column + * [HIVE-8690] - Move Avro dependency to 1.7.7 + + + + + + + + +** Task + * [HIVE-2974] - Add online docs for from_utc_timestamp() and to_utc_timestamp() + * [HIVE-5342] - Remove pre hadoop-0.20.0 related codes + * [HIVE-5976] - Decouple input formats from STORED as keywords + * [HIVE-6338] - Improve exception handling in createDefaultDb() in Metastore + * [HIVE-6432] - Remove deprecated methods in HCatalog + * [HIVE-6543] - TestEmbeddedThriftBinaryCLIService.testExecuteStatementAsync is failing sometimes + * [HIVE-6836] - Upgrade parquet to 1.4.0 + * [HIVE-6869] - Golden file updates for tez tests. + * [HIVE-6903] - Change default value of hive.metastore.execute.setugi to true + * [HIVE-6988] - Hive changes for tez-0.5.x compatibility + * [HIVE-7008] - Clean-up some old dead code + * [HIVE-7034] - Explain result of TezWork is not deterministic + * [HIVE-7095] - Fix test fails for both hadoop-1 and hadoop-2 + * [HIVE-7108] - Cleanup HBaseStorageHandler + * [HIVE-7126] - Cleanup build warnings while building hive projects + * [HIVE-7194] - authorization_ctas.q failing on trunk + * [HIVE-7206] - Duplicate declaration of build-helper-maven-plugin in root pom + * [HIVE-7240] - Add classifier for avro-mapred jar + * [HIVE-7289] - revert HIVE-6469 + * [HIVE-7301] - Restore constants moved to HiveConf by HIVE-7211 + * [HIVE-7364] - Trunk cannot be built on -Phadoop1 after HIVE-7144 + * [HIVE-7365] - Explain authorize for auth2 throws exception + * [HIVE-7381] - Class TezEdgeProperty missing license header + * [HIVE-7456] - HIVE-7361 missed updating authorization_fail_8 + * [HIVE-7496] - Exclude conf/hive-default.xml.template in version control and include it dist profile + * [HIVE-7497] - Fix some default values in HiveConf + * [HIVE-7498] - NPE on show grant for global privilege + * [HIVE-7524] - Enable auto conversion of SMBjoin in presence of constant propagate optimization + * [HIVE-7789] - Documentation for AccumuloStorageHandler + * [HIVE-7960] - Upgrade to Hadoop 2.5 + * [HIVE-8072] - TesParse_union is failing on trunk + * [HIVE-8224] - Support Char, Varchar in AvroSerDe + * [HIVE-8265] - Build failure on hadoop-1 + * [HIVE-8637] - In insert into X select from Y, table properties from X are clobbering those from Y + + +** Test + * [HIVE-7084] - TestWebHCatE2e is failing on trunk + * [HIVE-7085] - TestOrcHCatPigStorer.testWriteDecimal tests are failing on trunk + * [HIVE-7086] - TestHiveServer2.testConnection is failing on trunk + * [HIVE-7252] - TestTezTask.testSubmit fails in trunk + * [HIVE-7343] - Update committer list + * [HIVE-7684] - Avoid validating hook EnforceReadOnlyTable during test driver cleanup. + * [HIVE-8035] - Add SORT_QUERY_RESULTS for test that doesn't guarantee order + * [HIVE-8070] - TestHWIServer failed due to wrong references to war and properties file + * [HIVE-8213] - TestHWISessionManager failed due to miss hadoop2 dependencies + * [HIVE-8360] - Add cross cluster support for webhcat E2E tests + * [HIVE-8420] - TestHadoop20SAuthBridge broken with hadoop-1 + * [HIVE-8553] - Add a hive smoke test for Apache Bigtop + * [HIVE-8618] - Add SORT_QUERY_RESULTS for test that doesn't guarantee order #3 + + +** Wish + * [HIVE-6241] - Remove direct reference of Hadoop23Shims inQTestUtil + + Release Notes - Hive - Version 0.13.0 ** Sub-task diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java index c6ee5c4..144afe3 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo; import java.io.DataInput; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java index f7642cc..64eb18b 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo; import java.io.IOException; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloRow.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloRow.java index 03cd250..3dc2d3c 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloRow.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloRow.java @@ -1,3 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hive.accumulo; import java.util.ArrayList; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableInputFormat.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableInputFormat.java index 385b2f4..08d396e 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableInputFormat.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableInputFormat.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.mr; import java.io.IOException; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java index 5edc9b5..534e77f 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate; import java.util.ArrayList; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java index c303d49..ef459aa 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate; import java.io.IOException; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/CompareOp.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/CompareOp.java index 0585333..59768e5 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/CompareOp.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/CompareOp.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/DoubleCompare.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/DoubleCompare.java index 210ad72..53fd6e3 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/DoubleCompare.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/DoubleCompare.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; import java.math.BigDecimal; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Equal.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Equal.java index 3a34f12..54d987a 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Equal.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Equal.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThan.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThan.java index a47b2a3..f098114 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThan.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThan.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThanOrEqual.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThanOrEqual.java index c502a45..1877da2 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThanOrEqual.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThanOrEqual.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/IntCompare.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/IntCompare.java index d7de1ff..826355b 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/IntCompare.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/IntCompare.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; import java.nio.ByteBuffer; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThan.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThan.java index 2933131..9e3c30f 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThan.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThan.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThanOrEqual.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThanOrEqual.java index 86acb73..d21b6bf 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThanOrEqual.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThanOrEqual.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Like.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Like.java index 612641d..f34d3cc 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Like.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Like.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LongCompare.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LongCompare.java index b32874f..afd03d6 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LongCompare.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LongCompare.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; import java.nio.ByteBuffer; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/NotEqual.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/NotEqual.java index 22b84ba..e1bd798 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/NotEqual.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/NotEqual.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/PrimitiveComparison.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/PrimitiveComparison.java index 26e194f..b9a3e3e 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/PrimitiveComparison.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/PrimitiveComparison.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; /** diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/StringCompare.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/StringCompare.java index b71b8a8..0e038ad 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/StringCompare.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/StringCompare.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; import java.util.regex.Pattern; diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDe.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDe.java index 240521f..66ab01e 100644 --- a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDe.java +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDe.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.serde; import java.io.IOException; diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableInputFormat.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableInputFormat.java index e2ad8ef..e8beeb6 100644 --- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableInputFormat.java +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableInputFormat.java @@ -1,3 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hive.accumulo.mr; import static org.junit.Assert.assertArrayEquals; diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java index c0b14e1..97e14a2 100644 --- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate; import static org.junit.Assert.assertArrayEquals; diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestDoubleCompare.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestDoubleCompare.java index a6049c8..ca989a9 100644 --- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestDoubleCompare.java +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestDoubleCompare.java @@ -1,3 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hive.accumulo.predicate.compare; import static org.junit.Assert.assertEquals; diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestIntCompare.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestIntCompare.java index 9847a18..9fb6315 100644 --- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestIntCompare.java +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestIntCompare.java @@ -1,3 +1,19 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hive.accumulo.predicate.compare; import static org.junit.Assert.assertEquals; diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestLongComparison.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestLongComparison.java index 2abd41b..fe10c3f 100644 --- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestLongComparison.java +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestLongComparison.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; import static org.junit.Assert.assertEquals; diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestStringCompare.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestStringCompare.java index 08716bc..7b1960f 100644 --- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestStringCompare.java +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestStringCompare.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.predicate.compare; import static org.junit.Assert.assertFalse; diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDe.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDe.java index bf3acd0..18b84e4 100644 --- a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDe.java +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDe.java @@ -1,3 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.accumulo.serde; import static org.junit.Assert.assertEquals; diff --git a/beeline/src/java/org/apache/hive/beeline/Commands.java b/beeline/src/java/org/apache/hive/beeline/Commands.java index 7e366dc..61af793 100644 --- a/beeline/src/java/org/apache/hive/beeline/Commands.java +++ b/beeline/src/java/org/apache/hive/beeline/Commands.java @@ -725,7 +725,7 @@ private boolean execute(String line, boolean call) { String extra = beeLine.getConsoleReader().readLine(prompt.toString()); if (!beeLine.isComment(extra)) { - line += " " + extra; + line += "\n" + extra; } } } catch (Exception e) { diff --git a/beeline/src/main/resources/beeline-log4j.properties b/beeline/src/main/resources/beeline-log4j.properties new file mode 100644 index 0000000..fe47d94 --- /dev/null +++ b/beeline/src/main/resources/beeline-log4j.properties @@ -0,0 +1,24 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +log4j.rootLogger=WARN, console + +######## console appender ######## +log4j.appender.console=org.apache.log4j.ConsoleAppender +log4j.appender.console.target=System.err +log4j.appender.console.layout=org.apache.log4j.PatternLayout +log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n +log4j.appender.console.encoding=UTF-8 diff --git a/bin/ext/beeline.sh b/bin/ext/beeline.sh index ddd5906..a957fe1 100644 --- a/bin/ext/beeline.sh +++ b/bin/ext/beeline.sh @@ -25,7 +25,8 @@ beeline () { superCsvJarPath=`ls ${HIVE_LIB}/super-csv-*.jar` jlineJarPath=`ls ${HIVE_LIB}/jline-*.jar` jdbcStandaloneJarPath=`ls ${HIVE_LIB}/hive-jdbc-*-standalone.jar` - export HADOOP_CLASSPATH=${beelineJarPath}:${superCsvJarPath}:${jlineJarPath}:${jdbcStandaloneJarPath} + export HADOOP_CLASSPATH=${HIVE_CONF_DIR}:${beelineJarPath}:${superCsvJarPath}:${jlineJarPath}:${jdbcStandaloneJarPath} + export HADOOP_CLIENT_OPTS="$HADOOP_CLIENT_OPTS -Dlog4j.configuration=beeline-log4j.properties " exec $HADOOP jar ${beelineJarPath} $CLASS $HIVE_OPTS "$@" } diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java index d7a9b0e..0ccaacb 100644 --- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java +++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java @@ -68,10 +68,7 @@ import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; -import org.apache.hadoop.hive.service.HiveClient; -import org.apache.hadoop.hive.service.HiveServerException; import org.apache.hadoop.io.IOUtils; -import org.apache.thrift.TException; import sun.misc.Signal; import sun.misc.SignalHandler; @@ -150,50 +147,7 @@ public int processCmd(String cmd) { stringifyException(e)); ret = 1; } - } else if (ss.isRemoteMode()) { // remote mode -- connecting to remote hive server - HiveClient client = ss.getClient(); - PrintStream out = ss.out; - PrintStream err = ss.err; - - try { - client.execute(cmd_trimmed); - List results; - do { - results = client.fetchN(LINES_TO_FETCH); - for (String line : results) { - out.println(line); - } - } while (results.size() == LINES_TO_FETCH); - } catch (HiveServerException e) { - ret = e.getErrorCode(); - if (ret != 0) { // OK if ret == 0 -- reached the EOF - String errMsg = e.getMessage(); - if (errMsg == null) { - errMsg = e.toString(); - } - ret = e.getErrorCode(); - err.println("[Hive Error]: " + errMsg); - } - } catch (TException e) { - String errMsg = e.getMessage(); - if (errMsg == null) { - errMsg = e.toString(); - } - ret = -10002; - err.println("[Thrift Error]: " + errMsg); - } finally { - try { - client.clean(); - } catch (TException e) { - String errMsg = e.getMessage(); - if (errMsg == null) { - errMsg = e.toString(); - } - err.println("[Thrift Error]: Hive server is not cleaned due to thrift exception: " - + errMsg); - } - } - } else { // local mode + } else { // local mode try { CommandProcessor proc = CommandProcessorFactory.get(tokens, (HiveConf) conf); ret = processLocalCmd(cmd, proc, ss); @@ -695,31 +649,6 @@ public int run(String[] args) throws Exception { private int executeDriver(CliSessionState ss, HiveConf conf, OptionsProcessor oproc) throws Exception { - // connect to Hive Server - if (ss.getHost() != null) { - ss.connect(); - if (ss.isRemoteMode()) { - prompt = "[" + ss.host + ':' + ss.port + "] " + prompt; - char[] spaces = new char[prompt.length()]; - Arrays.fill(spaces, ' '); - prompt2 = new String(spaces); - } - } - - // CLI remote mode is a thin client: only load auxJars in local mode - if (!ss.isRemoteMode()) { - // hadoop-20 and above - we need to augment classpath using hiveconf - // components - // see also: code in ExecDriver.java - ClassLoader loader = conf.getClassLoader(); - String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS); - if (StringUtils.isNotBlank(auxJars)) { - loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ",")); - } - conf.setClassLoader(loader); - Thread.currentThread().setContextClassLoader(loader); - } - CliDriver cli = new CliDriver(); cli.setHiveVariables(oproc.getHiveVariables()); diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliSessionState.java b/cli/src/java/org/apache/hadoop/hive/cli/CliSessionState.java index f9d3beb..bd13960 100644 --- a/cli/src/java/org/apache/hadoop/hive/cli/CliSessionState.java +++ b/cli/src/java/org/apache/hadoop/hive/cli/CliSessionState.java @@ -25,13 +25,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.service.HiveClient; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; /** * SessionState for hive cli. @@ -63,66 +56,16 @@ */ public List initFiles = new ArrayList(); - /** - * host name and port number of remote Hive server - */ - protected String host; - protected int port; - - private boolean remoteMode; - - private TTransport transport; - private HiveClient client; - public CliSessionState(HiveConf conf) { super(conf); - remoteMode = false; - } - - /** - * Connect to Hive Server - */ - public void connect() throws TTransportException { - transport = new TSocket(host, port); - TProtocol protocol = new TBinaryProtocol(transport); - client = new HiveClient(protocol); - transport.open(); - remoteMode = true; - } - - public void setHost(String host) { - this.host = host; - } - - public String getHost() { - return host; - } - - public int getPort() { - return port; } @Override public void close() { try { super.close(); - if (remoteMode) { - client.clean(); - transport.close(); - } } catch (IOException ioe) { ioe.printStackTrace(); - } catch (TException e) { - e.printStackTrace(); - } + } } - - public boolean isRemoteMode() { - return remoteMode; - } - - public HiveClient getClient() { - return client; - } - } diff --git a/cli/src/java/org/apache/hadoop/hive/cli/OptionsProcessor.java b/cli/src/java/org/apache/hadoop/hive/cli/OptionsProcessor.java index 865848a..65725b9 100644 --- a/cli/src/java/org/apache/hadoop/hive/cli/OptionsProcessor.java +++ b/cli/src/java/org/apache/hadoop/hive/cli/OptionsProcessor.java @@ -83,20 +83,6 @@ public OptionsProcessor() { .withDescription("Use value for given property") .create()); - // -h hostname/ippaddress - options.addOption(OptionBuilder - .hasArg() - .withArgName("hostname") - .withDescription("connecting to Hive Server on remote host") - .create('h')); - - // -p port - options.addOption(OptionBuilder - .hasArg() - .withArgName("port") - .withDescription("connecting to Hive Server on port number") - .create('p')); - // Substitution option -d, --define options.addOption(OptionBuilder .withValueSeparator() @@ -169,10 +155,6 @@ public boolean process_stage2(CliSessionState ss) { ss.setIsVerbose(commandLine.hasOption('v')); - ss.host = (String) commandLine.getOptionValue('h'); - - ss.port = Integer.parseInt((String) commandLine.getOptionValue('p', "10000")); - String[] initFiles = commandLine.getOptionValues('i'); if (null != initFiles) { ss.initFiles = Arrays.asList(initFiles); diff --git a/cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java b/cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java index 2126cf2..88a37d5 100644 --- a/cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java +++ b/cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java @@ -56,10 +56,7 @@ import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.service.HiveClient; -import org.apache.hadoop.hive.service.HiveServerException; import org.apache.hadoop.util.Shell; -import org.apache.thrift.TException; // Cannot call class TestCliDriver since that's the name of the generated @@ -231,66 +228,6 @@ public void testQuit() throws Exception { } - /** - * test remote execCommand - */ - public void testRemoteCall() throws Exception { - MyCliSessionState ss = new MyCliSessionState(new HiveConf(), - org.apache.hadoop.hive.cli.TestCliDriverMethods.MyCliSessionState.ClientResult.RETURN_OK); - ss.err = System.err; - ByteArrayOutputStream data = new ByteArrayOutputStream(); - ss.out = new PrintStream(data); - MyCliSessionState.start(ss); - - CliDriver cliDriver = new CliDriver(); - cliDriver.processCmd("remote command"); - assertTrue(data.toString().contains("test result")); - - } - - /** - * test remote Exception - */ - public void testServerException() throws Exception { - MyCliSessionState ss = new MyCliSessionState( - new HiveConf(), - org.apache.hadoop.hive.cli.TestCliDriverMethods.MyCliSessionState.ClientResult.RETURN_SERVER_EXCEPTION); - ByteArrayOutputStream data = new ByteArrayOutputStream(); - ss.err = new PrintStream(data); - ss.out = System.out; - MyCliSessionState.start(ss); - - CliDriver cliDriver = new CliDriver(); - cliDriver.processCmd("remote command"); - assertTrue(data.toString().contains("[Hive Error]: test HiveServerException")); - data.reset(); - - - } - - /** - * test remote Exception - */ - public void testServerTException() throws Exception { - MyCliSessionState ss = new MyCliSessionState( - new HiveConf(), - org.apache.hadoop.hive.cli.TestCliDriverMethods.MyCliSessionState.ClientResult.RETURN_T_EXCEPTION); - ByteArrayOutputStream data = new ByteArrayOutputStream(); - ss.err = new PrintStream(data); - ss.out = System.out; - MyCliSessionState.start(ss); - - CliDriver cliDriver = new CliDriver(); - cliDriver.processCmd("remote command"); - assertTrue(data.toString().contains("[Thrift Error]: test TException")); - assertTrue(data.toString().contains( - "[Thrift Error]: Hive server is not cleaned due to thrift exception: test TException")); - - } - - /** - * test remote Exception - */ public void testProcessSelectDatabase() throws Exception { CliSessionState sessinState = new CliSessionState(new HiveConf()); CliSessionState.start(sessinState); @@ -521,63 +458,4 @@ public int getStatus() { return status; } } - - private static class MyCliSessionState extends CliSessionState { - - public enum ClientResult { - RETURN_OK, RETURN_SERVER_EXCEPTION, RETURN_T_EXCEPTION - }; - - private final ClientResult result; - - public MyCliSessionState(HiveConf conf, ClientResult result) { - super(conf); - this.result = result; - } - - @Override - public boolean isRemoteMode() { - return true; - } - - @Override - public HiveClient getClient() { - - HiveClient result = mock(HiveClient.class); - if (ClientResult.RETURN_OK.equals(this.result)) { - List fetchResult = new ArrayList(1); - fetchResult.add("test result"); - try { - when(result.fetchN(anyInt())).thenReturn(fetchResult); - } catch (HiveServerException e) { - } catch (Exception e) { - } - } else if (ClientResult.RETURN_SERVER_EXCEPTION.equals(this.result)) { - HiveServerException exception = new HiveServerException("test HiveServerException", 10, - "sql state"); - try { - when(result.fetchN(anyInt())).thenThrow(exception); - - when(result.fetchN(anyInt())).thenThrow(exception); - } catch (TException e) { - ; - } - return result; - } else if (ClientResult.RETURN_T_EXCEPTION.equals(this.result)) { - TException exception = new TException("test TException"); - try { - // org.mockito.Mockito. - doThrow(exception).when(result).clean(); - when(result.fetchN(anyInt())).thenThrow(exception); - } catch (TException e) { - e.printStackTrace(); - } - return result; - } - return result; - } - - } - - } diff --git a/cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java b/cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java index 63b9371..d9718c6 100644 --- a/cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java +++ b/cli/src/test/org/apache/hadoop/hive/cli/TestCliSessionState.java @@ -18,21 +18,10 @@ package org.apache.hadoop.hive.cli; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.InetAddress; -import java.net.ServerSocket; -import java.net.Socket; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.session.SessionState; -import org.junit.AfterClass; -import org.junit.BeforeClass; import org.junit.Test; /** @@ -40,46 +29,6 @@ */ public class TestCliSessionState { - private static TCPServer server; - private static String command = null; - - @BeforeClass - public static void start() throws Exception { - // start fake server - server = new TCPServer(); - Thread thread = new Thread(server); - thread.start(); - // wait for start server; - while (server.getPort() == 0) { - Thread.sleep(20); - } - } - - @AfterClass - public static void stop() throws IOException { - server.stop(); - } - - /** - * test CliSessionState for remote - */ - @Test - public void testConnect() throws Exception { - CliSessionState sessionState = new CliSessionState(new HiveConf()); - sessionState.port = server.getPort(); - sessionState.setHost(InetAddress.getLocalHost().getHostName()); - // check connect - sessionState.connect(); - assertTrue(sessionState.isRemoteMode()); - assertEquals(server.getPort(), sessionState.getPort()); - assertEquals(InetAddress.getLocalHost().getHostName(), sessionState.getHost()); - assertNotNull(sessionState.getClient()); - sessionState.close(); - // close should send command clean - assertEquals(command, "clean"); - - } - /** * test default db name */ @@ -89,44 +38,4 @@ public void testgetDbName() throws Exception { assertEquals(MetaStoreUtils.DEFAULT_DATABASE_NAME, SessionState.get().getCurrentDatabase()); } - - /** - * fake hive server - */ - private static class TCPServer implements Runnable { - private int port = 0; - private boolean stop = false; - private ServerSocket welcomeSocket; - - public void run() { - try { - - welcomeSocket = new ServerSocket(0); - port = welcomeSocket.getLocalPort(); - while (!stop) { - byte[] buffer = new byte[512]; - Socket connectionSocket = welcomeSocket.accept(); - InputStream input = connectionSocket.getInputStream(); - OutputStream output = connectionSocket.getOutputStream(); - int read = input.read(buffer); - // command without service bytes - command = new String(buffer, 8, read - 13); - // send derived - output.write(buffer, 0, read); - } - } catch (IOException e) { - ; - } - - } - - public int getPort() { - return port; - } - - public void stop() throws IOException { - stop = true; - welcomeSocket.close(); - } - } } diff --git a/cli/src/test/org/apache/hadoop/hive/cli/TestOptionsProcessor.java b/cli/src/test/org/apache/hadoop/hive/cli/TestOptionsProcessor.java index 577a8dd..9d0399a 100644 --- a/cli/src/test/org/apache/hadoop/hive/cli/TestOptionsProcessor.java +++ b/cli/src/test/org/apache/hadoop/hive/cli/TestOptionsProcessor.java @@ -41,8 +41,7 @@ public void testOptionsProcessor() { System.clearProperty("hivevar"); assertNull(System.getProperty("_A")); String[] args = { "-hiveconf", "_A=B", "-define", "C=D", "-hivevar", "X=Y", - "-S", "true", "-database", "testDb", "-e", "execString", "-v", "true", - "-h", "yahoo.host", "-p", "3000"}; + "-S", "true", "-database", "testDb", "-e", "execString", "-v", "true"}; // stage 1 assertTrue(processor.process_stage1(args)); @@ -55,8 +54,6 @@ public void testOptionsProcessor() { processor.process_stage2(sessionState); assertEquals("testDb", sessionState.database); assertEquals("execString", sessionState.execString); - assertEquals("yahoo.host", sessionState.host); - assertEquals(3000, sessionState.port); assertEquals(0, sessionState.initFiles.size()); assertTrue(sessionState.getIsVerbose()); sessionState.setConf(null); diff --git a/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java b/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java index bd3e997..d4cc32d 100644 --- a/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java +++ b/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java @@ -103,13 +103,6 @@ private short scale; /** - * This is the actual scale detected from the value passed to this Decimal128. - * The value is always equals or less than #scale. It is used to return the correct - * decimal string from {@link #getHiveDecimalString()}. - */ - private short actualScale; - - /** * -1 means negative, 0 means zero, 1 means positive. * * @serial @@ -134,7 +127,6 @@ public Decimal128() { this.unscaledValue = new UnsignedInt128(); this.scale = 0; this.signum = 0; - this.actualScale = 0; } /** @@ -147,7 +139,6 @@ public Decimal128(Decimal128 o) { this.unscaledValue = new UnsignedInt128(o.unscaledValue); this.scale = o.scale; this.signum = o.signum; - this.actualScale = o.actualScale; } /** @@ -187,7 +178,6 @@ public Decimal128(UnsignedInt128 unscaledVal, short scale, boolean negative) { checkScaleRange(scale); this.unscaledValue = new UnsignedInt128(unscaledVal); this.scale = scale; - this.actualScale = scale; if (unscaledValue.isZero()) { this.signum = 0; } else { @@ -274,7 +264,6 @@ public Decimal128 update(Decimal128 o) { this.unscaledValue.update(o.unscaledValue); this.scale = o.scale; this.signum = o.signum; - this.actualScale = o.actualScale; return this; } @@ -303,7 +292,7 @@ public Decimal128 update(long val) { /** * Update the value of this object with the given {@code long} with the given - * scale. + * scal. * * @param val * {@code long} value to be set to {@code Decimal128}. @@ -325,8 +314,6 @@ public Decimal128 update(long val, short scale) { if (scale != 0) { changeScaleDestructive(scale); } - // set actualScale to 0 because there is no fractional digits on integer values - this.actualScale = 0; return this; } @@ -354,11 +341,6 @@ public Decimal128 update(double val, short scale) { checkScaleRange(scale); this.scale = scale; - // Obtains the scale of the double value to keep a record of the original - // scale. This will be used to print the HiveDecimal string with the - // correct value scale. - this.actualScale = (short) BigDecimal.valueOf(val).scale(); - // Translate the double into sign, exponent and significand, according // to the formulae in JLS, Section 20.10.22. long valBits = Double.doubleToLongBits(val); @@ -382,10 +364,6 @@ public Decimal128 update(double val, short scale) { exponent++; } - // Calculate the real number of fractional digits from the double value - this.actualScale -= (exponent > 0) ? exponent : 0; - this.actualScale = (this.actualScale < 0) ? 0 : this.actualScale; - // so far same as java.math.BigDecimal, but the scaling below is // specific to ANSI SQL Numeric. @@ -448,7 +426,6 @@ public Decimal128 update(double val, short scale) { public Decimal128 update(IntBuffer buf, int precision) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update(buf, precision); assert ((signum == 0) == unscaledValue.isZero()); @@ -465,7 +442,6 @@ public Decimal128 update(IntBuffer buf, int precision) { public Decimal128 update128(IntBuffer buf) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update128(buf); assert ((signum == 0) == unscaledValue.isZero()); @@ -482,7 +458,6 @@ public Decimal128 update128(IntBuffer buf) { public Decimal128 update96(IntBuffer buf) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update96(buf); assert ((signum == 0) == unscaledValue.isZero()); @@ -499,7 +474,6 @@ public Decimal128 update96(IntBuffer buf) { public Decimal128 update64(IntBuffer buf) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update64(buf); assert ((signum == 0) == unscaledValue.isZero()); @@ -516,7 +490,6 @@ public Decimal128 update64(IntBuffer buf) { public Decimal128 update32(IntBuffer buf) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update32(buf); assert ((signum == 0) == unscaledValue.isZero()); @@ -537,7 +510,6 @@ public Decimal128 update32(IntBuffer buf) { public Decimal128 update(int[] array, int offset, int precision) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update(array, offset + 1, precision); return this; @@ -555,7 +527,6 @@ public Decimal128 update(int[] array, int offset, int precision) { public Decimal128 update128(int[] array, int offset) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update128(array, offset + 1); return this; @@ -573,7 +544,6 @@ public Decimal128 update128(int[] array, int offset) { public Decimal128 update96(int[] array, int offset) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update96(array, offset + 1); return this; @@ -591,7 +561,6 @@ public Decimal128 update96(int[] array, int offset) { public Decimal128 update64(int[] array, int offset) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update64(array, offset + 1); return this; @@ -609,7 +578,6 @@ public Decimal128 update64(int[] array, int offset) { public Decimal128 update32(int[] array, int offset) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); - this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update32(array, offset + 1); return this; @@ -632,6 +600,7 @@ public Decimal128 update(BigDecimal bigDecimal) { * @param scale */ public Decimal128 update(BigInteger bigInt, short scale) { + this.scale = scale; this.signum = (byte) bigInt.compareTo(BigInteger.ZERO); if (signum == 0) { update(0); @@ -640,9 +609,6 @@ public Decimal128 update(BigInteger bigInt, short scale) { } else { unscaledValue.update(bigInt); } - this.scale = scale; - this.actualScale = scale; - return this; } @@ -765,9 +731,6 @@ public Decimal128 update(char[] str, int offset, int length, short scale) { this.unscaledValue.addDestructive(accumulated); } - this.actualScale = (short) (fractionalDigits - exponent); - this.actualScale = (this.actualScale < 0) ? 0 : this.actualScale; - int scaleAdjust = scale - fractionalDigits + exponent; if (scaleAdjust > 0) { this.unscaledValue.scaleUpTenDestructive((short) scaleAdjust); @@ -961,7 +924,6 @@ public void changeScaleDestructive(short scale) { this.unscaledValue.scaleUpTenDestructive((short) -scaleDown); } this.scale = scale; - this.actualScale = scale; this.unscaledValue.throwIfExceedsTenToThirtyEight(); } @@ -1163,7 +1125,6 @@ public void multiplyDestructiveNativeDecimal128(Decimal128 right, short newScale if (this.signum == 0 || right.signum == 0) { this.zeroClear(); this.scale = newScale; - this.actualScale = newScale; return; } @@ -1193,7 +1154,6 @@ public void multiplyDestructiveNativeDecimal128(Decimal128 right, short newScale } this.scale = newScale; - this.actualScale = newScale; this.signum = (byte) (this.signum * right.signum); if (this.unscaledValue.isZero()) { this.signum = 0; // because of scaling down, this could happen @@ -1284,7 +1244,6 @@ public void divideDestructiveNativeDecimal128(Decimal128 right, short newScale, } if (this.signum == 0) { this.scale = newScale; - this.actualScale = newScale; remainder.update(this); return; } @@ -1312,7 +1271,6 @@ public void divideDestructiveNativeDecimal128(Decimal128 right, short newScale, } this.scale = newScale; - this.actualScale = newScale; this.signum = (byte) (this.unscaledValue.isZero() ? 0 : (this.signum * right.signum)); remainder.scale = scale; @@ -1773,13 +1731,17 @@ private static void checkScaleRange(short scale) { private int [] tmpArray = new int[2]; /** - * Returns the string representation of this value. It returns the original - * {@code actualScale} fractional part when this value was created. However, + * Returns the string representation of this value. It discards the trailing zeros + * in the fractional part to match the HiveDecimal's string representation. However, * don't use this string representation for the reconstruction of the object. * * @return string representation of this value */ public String getHiveDecimalString() { + if (this.signum == 0) { + return "0"; + } + StringBuilder buf = new StringBuilder(50); if (this.signum < 0) { buf.append('-'); @@ -1790,40 +1752,32 @@ public String getHiveDecimalString() { int trailingZeros = tmpArray[1]; int numIntegerDigits = unscaledLength - this.scale; if (numIntegerDigits > 0) { + // write out integer part first // then write out fractional part for (int i=0; i < numIntegerDigits; i++) { buf.append(unscaled[i]); } - if (this.actualScale > 0) { + if (this.scale > trailingZeros) { buf.append('.'); - - if (trailingZeros > this.actualScale) { - for (int i=0; i < (trailingZeros - this.scale); i++) { - buf.append("0"); - } - } - - for (int i = numIntegerDigits; i < (numIntegerDigits + this.actualScale); i++) { + for (int i = numIntegerDigits; i < (unscaledLength - trailingZeros); i++) { buf.append(unscaled[i]); } } } else { + // no integer part buf.append('0'); - if (this.actualScale > 0) { + if (this.scale > trailingZeros) { + // fractional part has, starting with zeros buf.append('.'); - - if (this.actualScale > trailingZeros) { - for (int i = unscaledLength; i < this.scale; ++i) { - buf.append('0'); - } + for (int i = unscaledLength; i < this.scale; ++i) { + buf.append('0'); } - - for (int i = 0; i < (numIntegerDigits + this.actualScale); i++) { + for (int i = 0; i < (unscaledLength - trailingZeros); i++) { buf.append(unscaled[i]); } } @@ -1882,10 +1836,9 @@ public String toFormalString() { @Override public String toString() { - return toFormalString() + "(Decimal128: scale=" + scale + ", actualScale=" - + this.actualScale + ", signum=" + signum + ", BigDecimal.toString=" - + toBigDecimal().toString() + ", unscaledValue=[" + unscaledValue.toString() - + "])"; + return toFormalString() + "(Decimal128: scale=" + scale + ", signum=" + + signum + ", BigDecimal.toString=" + toBigDecimal().toString() + + ", unscaledValue=[" + unscaledValue.toString() + "])"; } /** @@ -2003,7 +1956,6 @@ public Decimal128 updateVarianceDestructive( */ public Decimal128 fastUpdateFromInternalStorage(byte[] internalStorage, short scale) { this.scale = scale; - this.actualScale = scale; this.signum = this.unscaledValue.fastUpdateFromInternalStorage(internalStorage); return this; diff --git a/common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java b/common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java index 0bb84e1..c2bf6d7 100644 --- a/common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java +++ b/common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java @@ -30,6 +30,7 @@ public class HiveDecimal implements Comparable { public static final int MAX_PRECISION = 38; public static final int MAX_SCALE = 38; + /** * Default precision/scale when user doesn't specify in the column metadata, such as * decimal and decimal(8). @@ -112,7 +113,7 @@ public int compareTo(HiveDecimal dec) { @Override public int hashCode() { - return trim(bd).hashCode(); + return bd.hashCode(); } @Override @@ -168,7 +169,7 @@ public HiveDecimal subtract(HiveDecimal dec) { } public HiveDecimal multiply(HiveDecimal dec) { - return create(bd.multiply(dec.bd), true); + return create(bd.multiply(dec.bd), false); } public BigInteger unscaledValue() { @@ -201,7 +202,7 @@ public HiveDecimal remainder(HiveDecimal dec) { } public HiveDecimal divide(HiveDecimal dec) { - return create(trim(bd.divide(dec.bd, MAX_SCALE, RoundingMode.HALF_UP)), true); + return create(bd.divide(dec.bd, MAX_SCALE, RoundingMode.HALF_UP), true); } /** @@ -231,6 +232,8 @@ private static BigDecimal normalize(BigDecimal bd, boolean allowRounding) { return null; } + bd = trim(bd); + int intDigits = bd.precision() - bd.scale(); if (intDigits > MAX_PRECISION) { @@ -241,6 +244,8 @@ private static BigDecimal normalize(BigDecimal bd, boolean allowRounding) { if (bd.scale() > maxScale ) { if (allowRounding) { bd = bd.setScale(maxScale, RoundingMode.HALF_UP); + // Trimming is again necessary, because rounding may introduce new trailing 0's. + bd = trim(bd); } else { bd = null; } @@ -254,6 +259,8 @@ public static BigDecimal enforcePrecisionScale(BigDecimal bd, int maxPrecision, return null; } + bd = trim(bd); + if (bd.scale() > maxScale) { bd = bd.setScale(maxScale, RoundingMode.HALF_UP); } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 7a67a81..46b56bb 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -519,6 +519,11 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "work for all queries on your datastore. If all SQL queries fail (for example, your\n" + "metastore is backed by MongoDB), you might want to disable this to save the\n" + "try-and-fall-back cost."), + METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0, + "Batch size for partition and other object retrieval from the underlying DB in direct\n" + + "SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" + + "that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" + + "may impede performance. -1 means no batching, 0 means automatic batching."), METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true, "Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" + "modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" + @@ -565,6 +570,8 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "", "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" + "* implies all the keys will get inherited."), + METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl", + "Metastore hook class for filtering the metadata read results"), // Parameters for exporting metadata on table drop (requires the use of the) // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener @@ -1487,6 +1494,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "An example like \"select,drop\" will grant select and drop privilege to the owner\n" + "of the table. Note that the default gives the creator of a table no access to the\n" + "table (but see HIVE-8067)."), + HIVE_AUTHORIZATION_TASK_FACTORY("hive.security.authorization.task.factory", + "org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl", + "Authorization DDL task factory implementation"), // if this is not set default value is set during config initialization // Default value can't be set in this constructor as it would refer names in other ConfVars @@ -1539,12 +1549,13 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { // operation log configuration HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true, - "When true, HS2 will save operation logs"), + "When true, HS2 will save operation logs and make them available for clients"), HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION("hive.server2.logging.operation.log.location", "${system:java.io.tmpdir}" + File.separator + "${system:user.name}" + File.separator + "operation_logs", "Top level directory where operation logs are stored if logging functionality is enabled"), - + HIVE_SERVER2_LOGGING_OPERATION_VERBOSE("hive.server2.logging.operation.verbose", false, + "When true, HS2 operation logs available for clients will be verbose"), // logging configuration HIVE_LOG4J_FILE("hive.log4j.file", "", "Hive log4j configuration file.\n" + @@ -1599,7 +1610,7 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { "table. From 0.12 onwards, they are displayed separately. This flag will let you\n" + "get old behavior, if desired. See, test-case in patch for HIVE-6689."), - HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv2Hello,SSLv3", + HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv3", "SSL Versions to disable for all Hive Servers"), // HiveServer2 specific configs diff --git a/common/src/java/org/apache/hive/common/util/Decimal128FastBuffer.java b/common/src/java/org/apache/hive/common/util/Decimal128FastBuffer.java index aeca82f..c0961fa 100644 --- a/common/src/java/org/apache/hive/common/util/Decimal128FastBuffer.java +++ b/common/src/java/org/apache/hive/common/util/Decimal128FastBuffer.java @@ -1,6 +1,21 @@ /** - * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + package org.apache.hive.common.util; import java.nio.ByteBuffer; diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java b/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java index 0786cca..46236a5 100644 --- a/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java +++ b/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java @@ -811,7 +811,7 @@ public void testToHiveDecimalString() { assertEquals("0.00923076923", d2.getHiveDecimalString()); Decimal128 d3 = new Decimal128("0.00923076000", (short) 15); - assertEquals("0.00923076000", d3.getHiveDecimalString()); + assertEquals("0.00923076", d3.getHiveDecimalString()); Decimal128 d4 = new Decimal128("4294967296.01", (short) 15); assertEquals("4294967296.01", d4.getHiveDecimalString()); @@ -849,37 +849,15 @@ public void testToHiveDecimalString() { d11.update(hd6.bigDecimalValue()); assertEquals(hd6.toString(), d11.getHiveDecimalString()); - // The trailing zeros from a double value are trimmed automatically - // by the double data type Decimal128 d12 = new Decimal128(27.000, (short)3); - HiveDecimal hd7 = HiveDecimal.create(new BigDecimal("27.0")); + HiveDecimal hd7 = HiveDecimal.create(new BigDecimal("27.000")); assertEquals(hd7.toString(), d12.getHiveDecimalString()); - assertEquals("27.0", d12.getHiveDecimalString()); + assertEquals("27", d12.getHiveDecimalString()); Decimal128 d13 = new Decimal128(1234123000, (short)3); HiveDecimal hd8 = HiveDecimal.create(new BigDecimal("1234123000")); assertEquals(hd8.toString(), d13.getHiveDecimalString()); assertEquals("1234123000", d13.getHiveDecimalString()); - - Decimal128 d14 = new Decimal128(1.33e4, (short)10); - HiveDecimal hd9 = HiveDecimal.create(new BigDecimal("1.33e4")); - assertEquals(hd9.toString(), d14.getHiveDecimalString()); - assertEquals("13300", d14.getHiveDecimalString()); - - Decimal128 d15 = new Decimal128(1.33e-4, (short)10); - HiveDecimal hd10 = HiveDecimal.create(new BigDecimal("1.33e-4")); - assertEquals(hd10.toString(), d15.getHiveDecimalString()); - assertEquals("0.000133", d15.getHiveDecimalString()); - - Decimal128 d16 = new Decimal128("1.33e4", (short)10); - HiveDecimal hd11 = HiveDecimal.create(new BigDecimal("1.33e4")); - assertEquals(hd11.toString(), d16.getHiveDecimalString()); - assertEquals("13300", d16.getHiveDecimalString()); - - Decimal128 d17 = new Decimal128("1.33e-4", (short)10); - HiveDecimal hd12 = HiveDecimal.create(new BigDecimal("1.33e-4")); - assertEquals(hd12.toString(), d17.getHiveDecimalString()); - assertEquals("0.000133", d17.getHiveDecimalString()); } @Test diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java index 959989a..ba5ef71 100644 --- a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java +++ b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java @@ -57,35 +57,27 @@ public void testPrecisionScaleEnforcement() { Assert.assertEquals("-1786135888657847525803324040144343378.1", dec.toString()); dec = HiveDecimal.create("005.34000"); - Assert.assertEquals(dec.precision(), 6); - Assert.assertEquals(dec.scale(), 5); + Assert.assertEquals(dec.precision(), 3); + Assert.assertEquals(dec.scale(), 2); dec = HiveDecimal.create("178613588865784752580332404014434337809799306448796128931113691624"); Assert.assertNull(dec); - // Leaving trailing zeros - Assert.assertEquals("0.0", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.0"), 2, 1).toString()); - Assert.assertEquals("0.00", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.00"), 3, 2).toString()); - Assert.assertEquals("0.0000", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.0000"), 10, 4).toString()); - Assert.assertEquals("100.00000", HiveDecimal.enforcePrecisionScale(new BigDecimal("100.00000"), 15, 5).toString()); - Assert.assertEquals("100.00", HiveDecimal.enforcePrecisionScale(new BigDecimal("100.00"), 15, 5).toString()); - - // Rounding numbers - Assert.assertEquals("0.01", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.012"), 3, 2).toString()); - Assert.assertEquals("0.02", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.015"), 3, 2).toString()); - Assert.assertEquals("0.01", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.0145"), 3, 2).toString()); - // Rounding numbers that increase int digits Assert.assertEquals("10", HiveDecimal.enforcePrecisionScale(new BigDecimal("9.5"), 2, 0).toString()); Assert.assertNull(HiveDecimal.enforcePrecisionScale(new BigDecimal("9.5"), 1, 0)); Assert.assertEquals("9", HiveDecimal.enforcePrecisionScale(new BigDecimal("9.4"), 1, 0).toString()); + } - // Integers with no scale values are not modified (zeros are not null) - Assert.assertEquals("0", HiveDecimal.enforcePrecisionScale(new BigDecimal("0"), 1, 0).toString()); - Assert.assertEquals("30", HiveDecimal.enforcePrecisionScale(new BigDecimal("30"), 2, 0).toString()); - Assert.assertEquals("5", HiveDecimal.enforcePrecisionScale(new BigDecimal("5"), 3, 2).toString()); + @Test + @Concurrent(count=4) + @Repeating(repetition=100) + public void testTrailingZeroRemovalAfterEnforcement() { + String decStr = "8.090000000000000000000000000000000000000123456"; + HiveDecimal dec = HiveDecimal.create(decStr); + Assert.assertEquals("8.09", dec.toString()); } @Test @@ -94,7 +86,7 @@ public void testPrecisionScaleEnforcement() { public void testMultiply() { HiveDecimal dec1 = HiveDecimal.create("0.00001786135888657847525803"); HiveDecimal dec2 = HiveDecimal.create("3.0000123456789"); - Assert.assertNotNull(dec1.multiply(dec2)); + Assert.assertNull(dec1.multiply(dec2)); dec1 = HiveDecimal.create("178613588865784752580323232232323444.4"); dec2 = HiveDecimal.create("178613588865784752580302323232.3"); @@ -106,11 +98,11 @@ public void testMultiply() { dec1 = HiveDecimal.create("3.140"); dec2 = HiveDecimal.create("1.00"); - Assert.assertEquals("3.14000", dec1.multiply(dec2).toString()); + Assert.assertEquals("3.14", dec1.multiply(dec2).toString()); dec1 = HiveDecimal.create("43.010"); dec2 = HiveDecimal.create("2"); - Assert.assertEquals("86.020", dec1.multiply(dec2).toString()); + Assert.assertEquals("86.02", dec1.multiply(dec2).toString()); } @Test @@ -125,7 +117,7 @@ public void testPow() { Assert.assertNull(dec1); dec1 = HiveDecimal.create("3.140"); - Assert.assertEquals("9.859600", dec1.pow(2).toString()); + Assert.assertEquals("9.8596", dec1.pow(2).toString()); } @Test @@ -155,7 +147,7 @@ public void testPlus() { dec1 = HiveDecimal.create("3.140"); dec2 = HiveDecimal.create("1.00"); - Assert.assertEquals("4.140", dec1.add(dec2).toString()); + Assert.assertEquals("4.14", dec1.add(dec2).toString()); } @@ -165,7 +157,7 @@ public void testPlus() { public void testSubtract() { HiveDecimal dec1 = HiveDecimal.create("3.140"); HiveDecimal dec2 = HiveDecimal.create("1.00"); - Assert.assertEquals("2.140", dec1.subtract(dec2).toString()); + Assert.assertEquals("2.14", dec1.subtract(dec2).toString()); } @Test diff --git a/dev-support/jenkins-common.sh b/dev-support/jenkins-common.sh index f49f099..52b518e 100644 --- a/dev-support/jenkins-common.sh +++ b/dev-support/jenkins-common.sh @@ -22,10 +22,11 @@ fail() { # Exports two variables of import: # * BUILD_PROFILE - the profile which the ptest server understands # * BUILD_OPTS - additional test options to be sent to ptest cli +# * PATCH_URL - the URL to the patch file process_jira() { test -n "$BRANCH" || fail "BRANCH must be specified" test -n "$JIRA_ROOT_URL" || fail "JIRA_ROOT_URL must be specified" - test -n "$JIRA_NAME" || fail "API_PASSWORD must be specified" + test -n "$JIRA_NAME" || fail "JIRA_NAME must be specified" JIRA_TEXT=$(mktemp) trap "rm -f $JIRA_TEXT" EXIT curl -s -S --location --retry 3 "${JIRA_ROOT_URL}/jira/browse/${JIRA_NAME}" > $JIRA_TEXT @@ -39,7 +40,7 @@ process_jira() { fail "$JIRA_NAME is not \"Patch Available\". Exiting." fi # pull attachments from JIRA (hack stolen from hadoop since rest api doesn't show attachments) - PATCH_URL=$(grep -o '"/jira/secure/attachment/[0-9]*/[^"]*' $JIRA_TEXT | \ + export PATCH_URL=$(grep -o '"/jira/secure/attachment/[0-9]*/[^"]*' $JIRA_TEXT | \ grep -v -e 'htm[l]*$' | sort | tail -1 | \ grep -o '/jira/secure/attachment/[0-9]*/[^"]*') if [[ -z "$PATCH_URL" ]] diff --git a/dev-support/jenkins-execute-build.sh b/dev-support/jenkins-execute-build.sh index 06b343a..f6ad710 100644 --- a/dev-support/jenkins-execute-build.sh +++ b/dev-support/jenkins-execute-build.sh @@ -19,15 +19,34 @@ test -n "$BRANCH" || fail "BRANCH must be specified" test -n "$API_ENDPOINT" || fail "API_ENDPOINT must be specified" test -n "$LOG_ENDPOINT" || fail "LOG_ENDPOINT must be specified" test -n "$API_PASSWORD" || fail "API_PASSWORD must be specified" -export JIRA_NAME="HIVE-${ISSUE_NUM}" +if [[ -n "$ISSUE_NUM" ]] +then + export JIRA_NAME="HIVE-${ISSUE_NUM}" +fi export ROOT=$PWD export JIRA_ROOT_URL="https://issues.apache.org" export BUILD_TAG="${BUILD_TAG##jenkins-}" -echo $JIRA_NAME +if [[ -n "$JIRA_NAME" ]] +then + echo $JIRA_NAME +fi set -x env -process_jira +if [[ -n "$JIRA_NAME" ]] +then + process_jira +fi + +profile=$BUILD_PROFILE +if [[ -z "$profile" ]] +then + profile=$DEFAULT_BUILD_PROFILE +fi +if [[ -z "$profile" ]] +then + fail "Could not find build profile" +fi test -d hive/build/ || mkdir -p hive/build/ cd hive/build/ @@ -35,17 +54,13 @@ rm -rf ptest2 svn co http://svn.apache.org/repos/asf/hive/trunk/testutils/ptest2/ ptest2 cd ptest2 -# sanity check the profile -case "$BUILD_PROFILE" in - trunk-mr1);; - trunk-mr2);; - *) - echo "Unknown profile '$BUILD_PROFILE'" - exit 1 - ;; -esac mvn clean package -DskipTests -Drat.numUnapprovedLicenses=1000 -Dmaven.repo.local=$WORKSPACE/.m2 set +e +optionalArgs=() +if [[ -n "$JIRA_NAME" ]] +then + optionalArgs=(--patch "${JIRA_ROOT_URL}${PATCH_URL}" --jira "$JIRA_NAME") +fi java -cp "target/hive-ptest-1.0-classes.jar:target/lib/*" org.apache.hive.ptest.api.client.PTestClient --endpoint "$API_ENDPOINT" \ --logsEndpoint "$LOG_ENDPOINT" \ --command testStart \ @@ -53,8 +68,7 @@ java -cp "target/hive-ptest-1.0-classes.jar:target/lib/*" org.apache.hive.ptest. --password $API_PASSWORD \ --outputDir target/ \ --testHandle "$BUILD_TAG" \ - --patch "${JIRA_ROOT_URL}${PATCH_URL}" \ - --jira "$JIRA_NAME" ${BUILD_OPTS} "$@" + ${optionalArgs[@]} ${BUILD_OPTS} "$@" ret=$? cd target/ if [[ -f test-results.tar.gz ]] diff --git a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseKeyFactory3.java b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseKeyFactory3.java index dfcbaf5..2784767 100644 --- a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseKeyFactory3.java +++ b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseKeyFactory3.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.hbase; import java.io.IOException; diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java index 2f624df..63909b8 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatUtil.java @@ -84,8 +84,9 @@ private static volatile HiveClientCache hiveClientCache; public static boolean checkJobContextIfRunningFromBackend(JobContext j) { - if (j.getConfiguration().get("mapred.task.id", "").equals("") && - !("true".equals(j.getConfiguration().get("pig.illustrating")))) { + if (j.getConfiguration().get("pig.job.converted.fetch", "").equals("") && + j.getConfiguration().get("mapred.task.id", "").equals("") && + !("true".equals(j.getConfiguration().get("pig.illustrating")))) { return false; } return true; diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java index 730b6ef..ffa648d 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java @@ -24,6 +24,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -44,6 +45,7 @@ import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; +import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * A thread safe time expired cache for HiveMetaStoreClient @@ -59,7 +61,7 @@ private static final AtomicInteger nextId = new AtomicInteger(0); - private ScheduledFuture cleanupHandle; // used to cleanup cache + private final ScheduledFuture cleanupHandle; // used to cleanup cache // Since HiveMetaStoreClient is not threadsafe, hive clients are not shared across threads. // Thread local variable containing each thread's unique ID, is used as one of the keys for the cache @@ -91,6 +93,7 @@ public HiveClientCache(final int timeout) { this.timeout = timeout; RemovalListener removalListener = new RemovalListener() { + @Override public void onRemoval(RemovalNotification notification) { CacheableHiveMetaStoreClient hiveMetaStoreClient = notification.getValue(); if (hiveMetaStoreClient != null) { @@ -108,6 +111,7 @@ public void onRemoval(RemovalNotification, c6 map, c7 map," - + " c8 struct," - + " c9 tinyint, c10 smallint, c11 float, c12 bigint," - + " c13 array>," - + " c14 map>," - + " c15 struct>," - + " c16 array,n:int>>," - + " c17 timestamp, " - + " c18 decimal(16,7)," - + " c19 binary," - + " c20 date) comment'" + dataTypeTableComment - +"' partitioned by (dt STRING)"); - assertFalse(res.next()); - - // load data - res = stmt.executeQuery("load data local inpath '" - + dataTypeDataFilePath.toString() + "' into table " + dataTypeTableName - + " PARTITION (dt='20090619')"); - assertFalse(res.next()); - - // drop view. ignore error. - try { - stmt.executeQuery("drop view " + viewName); - } catch (Exception ex) { - fail(ex.toString()); - } - - // create view - res = stmt.executeQuery("create view " + viewName + " comment '"+viewComment - +"' as select * from "+ tableName); - assertFalse(res.next()); - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); - - // drop table - Statement stmt = con.createStatement(); - assertNotNull("Statement is null", stmt); - ResultSet res = stmt.executeQuery("drop table " + tableName); - assertFalse(res.next()); - res = stmt.executeQuery("drop table " + partitionedTableName); - assertFalse(res.next()); - res = stmt.executeQuery("drop table " + dataTypeTableName); - assertFalse(res.next()); - - con.close(); - assertTrue("Connection should be closed", con.isClosed()); - - Exception expectedException = null; - try { - con.createStatement(); - } catch (Exception e) { - expectedException = e; - } - - assertNotNull( - "createStatement() on closed connection should throw exception", - expectedException); - } - - /** - * verify 'explain ...' resultset - * @throws SQLException - */ - public void testExplainStmt() throws SQLException { - Statement stmt = con.createStatement(); - - ResultSet res = stmt.executeQuery( - "explain select c1, c2, c3, c4, c5 as a, c6, c7, c8, c9, c10, c11, c12, " + - "c1*2, sentences(null, null, null) as b from " + dataTypeTableName + " limit 1"); - - ResultSetMetaData md = res.getMetaData(); - assertEquals(md.getColumnCount(), 1); // only one result column - assertEquals(md.getColumnLabel(1), EXPL_COLUMN_NAME); // verify the column name - //verify that there is data in the resultset - assertTrue("Nothing returned explain", res.next()); - } - - public void testPrepareStatement() { - - String sql = "from (select count(1) from " - + tableName - + " where 'not?param?not?param' <> 'not_param??not_param' and ?=? " - + " and 1=? and 2=? and 3.0=? and 4.0=? and 'test\\'string\"'=? and 5=? and ?=? " - + " and date '2012-01-01' = date ?" - + " ) t select '2011-03-25' ddate,'China',true bv, 10 num limit 10"; - - /////////////////////////////////////////////// - //////////////////// correct testcase - //////////////////// executed twice: once with the typed ps setters, once with the generic setObject - ////////////////////////////////////////////// - try { - PreparedStatement ps = createPreapredStatementUsingSetXXX(sql); - ResultSet res = ps.executeQuery(); - assertPreparedStatementResultAsExpected(res); - ps.close(); - - ps = createPreapredStatementUsingSetObject(sql); - res = ps.executeQuery(); - assertPreparedStatementResultAsExpected(res); - ps.close(); - - } catch (Exception e) { - e.printStackTrace(); - fail(e.toString()); - } - - /////////////////////////////////////////////// - //////////////////// other failure testcases - ////////////////////////////////////////////// - // set nothing for prepared sql - Exception expectedException = null; - try { - PreparedStatement ps = con.prepareStatement(sql); - ps.executeQuery(); - } catch (Exception e) { - expectedException = e; - } - assertNotNull( - "Execute the un-setted sql statement should throw exception", - expectedException); - - // set some of parameters for prepared sql, not all of them. - expectedException = null; - try { - PreparedStatement ps = con.prepareStatement(sql); - ps.setBoolean(1, true); - ps.setBoolean(2, true); - ps.executeQuery(); - } catch (Exception e) { - expectedException = e; - } - assertNotNull( - "Execute the invalid setted sql statement should throw exception", - expectedException); - - // set the wrong type parameters for prepared sql. - expectedException = null; - try { - PreparedStatement ps = con.prepareStatement(sql); - - // wrong type here - ps.setString(1, "wrong"); - - assertTrue(true); - ResultSet res = ps.executeQuery(); - if (!res.next()) { - throw new Exception("there must be a empty result set"); - } - } catch (Exception e) { - expectedException = e; - } - assertNotNull( - "Execute the invalid setted sql statement should throw exception", - expectedException); - - // setObject to the yet unknown type java.util.Date - expectedException = null; - try { - PreparedStatement ps = con.prepareStatement(sql); - ps.setObject(1, new Date()); - ps.executeQuery(); - } catch (Exception e) { - expectedException = e; - } - assertNotNull( - "Setting to an unknown type should throw an exception", - expectedException); - - } - - private PreparedStatement createPreapredStatementUsingSetObject(String sql) throws SQLException { - PreparedStatement ps = con.prepareStatement(sql); - - ps.setObject(1, true); //setBoolean - ps.setObject(2, true); //setBoolean - - ps.setObject(3, Short.valueOf("1")); //setShort - ps.setObject(4, 2); //setInt - ps.setObject(5, 3f); //setFloat - ps.setObject(6, Double.valueOf(4)); //setDouble - ps.setObject(7, "test'string\""); //setString - ps.setObject(8, 5L); //setLong - ps.setObject(9, (byte) 1); //setByte - ps.setObject(10, (byte) 1); //setByte - ps.setString(11, "2012-01-01"); //setString - - ps.setMaxRows(2); - return ps; - } - - private PreparedStatement createPreapredStatementUsingSetXXX(String sql) throws SQLException { - PreparedStatement ps = con.prepareStatement(sql); - - ps.setBoolean(1, true); //setBoolean - ps.setBoolean(2, true); //setBoolean - - ps.setShort(3, Short.valueOf("1")); //setShort - ps.setInt(4, 2); //setInt - ps.setFloat(5, 3f); //setFloat - ps.setDouble(6, Double.valueOf(4)); //setDouble - ps.setString(7, "test'string\""); //setString - ps.setLong(8, 5L); //setLong - ps.setByte(9, (byte) 1); //setByte - ps.setByte(10, (byte) 1); //setByte - ps.setString(11, "2012-01-01"); //setString - - ps.setMaxRows(2); - return ps; - } - - private void assertPreparedStatementResultAsExpected(ResultSet res ) throws SQLException { - assertNotNull(res); - - while (res.next()) { - assertEquals("2011-03-25", res.getString("ddate")); - assertEquals("10", res.getString("num")); - assertEquals((byte) 10, res.getByte("num")); - assertEquals("2011-03-25", res.getDate("ddate").toString()); - assertEquals(Double.valueOf(10).doubleValue(), res.getDouble("num"), 0.1); - assertEquals(10, res.getInt("num")); - assertEquals(Short.valueOf("10").shortValue(), res.getShort("num")); - assertEquals(10L, res.getLong("num")); - assertEquals(true, res.getBoolean("bv")); - Object o = res.getObject("ddate"); - assertNotNull(o); - o = res.getObject("num"); - assertNotNull(o); - } - res.close(); - assertTrue(true); - } - - public final void testSelectAll() throws Exception { - doTestSelectAll(tableName, -1, -1); // tests not setting maxRows (return all) - doTestSelectAll(tableName, 0, -1); // tests setting maxRows to 0 (return all) - } - - public final void testSelectAllPartioned() throws Exception { - doTestSelectAll(partitionedTableName, -1, -1); // tests not setting maxRows - // (return all) - doTestSelectAll(partitionedTableName, 0, -1); // tests setting maxRows to 0 - // (return all) - } - - public final void testSelectAllMaxRows() throws Exception { - doTestSelectAll(tableName, 100, -1); - } - - public final void testSelectAllFetchSize() throws Exception { - doTestSelectAll(tableName, 100, 20); - } - - public void testNullType() throws Exception { - Statement stmt = con.createStatement(); - try { - ResultSet res = stmt.executeQuery("select null from " + dataTypeTableName); - assertTrue(res.next()); - assertNull(res.getObject(1)); - } finally { - stmt.close(); - } - } - - public void testDataTypes() throws Exception { - Statement stmt = con.createStatement(); - - ResultSet res = stmt.executeQuery( - "select * from " + dataTypeTableName + " order by c1"); - ResultSetMetaData meta = res.getMetaData(); - - // row 1 - assertTrue(res.next()); - // skip the last (partitioning) column since it is always non-null - for (int i = 1; i < meta.getColumnCount(); i++) { - assertNull(res.getObject(i)); - } - - // row 2 - assertTrue(res.next()); - assertEquals(-1, res.getInt(1)); - assertEquals(false, res.getBoolean(2)); - assertEquals(-1.1d, res.getDouble(3)); - assertEquals("", res.getString(4)); - assertEquals("[]", res.getString(5)); - assertEquals("{}", res.getString(6)); - assertEquals("{}", res.getString(7)); - assertEquals("[null, null, null]", res.getString(8)); - assertEquals(-1, res.getByte(9)); - assertEquals(-1, res.getShort(10)); - assertEquals(-1.0f, res.getFloat(11)); - assertEquals(-1, res.getLong(12)); - assertEquals("[]", res.getString(13)); - assertEquals("{}", res.getString(14)); - assertEquals("[null, null]", res.getString(15)); - assertEquals("[]", res.getString(16)); - assertEquals(null, res.getString(17)); - assertEquals(null, res.getTimestamp(17)); - assertEquals(null, res.getBigDecimal(18)); - assertEquals(null, res.getString(20)); - assertEquals(null, res.getDate(20)); - - // row 3 - assertTrue(res.next()); - assertEquals(1, res.getInt(1)); - assertEquals(true, res.getBoolean(2)); - assertEquals(1.1d, res.getDouble(3)); - assertEquals("1", res.getString(4)); - assertEquals("[1, 2]", res.getString(5)); - assertEquals("{1=x, 2=y}", res.getString(6)); - assertEquals("{k=v}", res.getString(7)); - assertEquals("[a, 9, 2.2]", res.getString(8)); - assertEquals(1, res.getByte(9)); - assertEquals(1, res.getShort(10)); - assertEquals(1.0f, res.getFloat(11)); - assertEquals(1, res.getLong(12)); - assertEquals("[[a, b], [c, d]]", res.getString(13)); - assertEquals("{1={11=12, 13=14}, 2={21=22}}", res.getString(14)); - assertEquals("[1, [2, x]]", res.getString(15)); - assertEquals("[[{}, 1], [{c=d, a=b}, 2]]", res.getString(16)); - assertEquals("2012-04-22 09:00:00.123456789", res.getString(17)); - assertEquals("2012-04-22 09:00:00.123456789", res.getTimestamp(17).toString()); - assertEquals("123456789.0123456", res.getBigDecimal(18).toString()); - assertEquals("2013-01-01", res.getString(20)); - assertEquals("2013-01-01", res.getDate(20).toString()); - - // test getBoolean rules on non-boolean columns - assertEquals(true, res.getBoolean(1)); - assertEquals(true, res.getBoolean(4)); - - // no more rows - assertFalse(res.next()); - } - - private void doTestSelectAll(String tableName, int maxRows, int fetchSize) throws Exception { - boolean isPartitionTable = tableName.equals(partitionedTableName); - - Statement stmt = con.createStatement(); - if (maxRows >= 0) { - stmt.setMaxRows(maxRows); - } - if (fetchSize > 0) { - stmt.setFetchSize(fetchSize); - assertEquals(fetchSize, stmt.getFetchSize()); - } - - // JDBC says that 0 means return all, which is the default - int expectedMaxRows = maxRows < 1 ? 0 : maxRows; - - assertNotNull("Statement is null", stmt); - assertEquals("Statement max rows not as expected", expectedMaxRows, stmt - .getMaxRows()); - assertFalse("Statement should not be closed", stmt.isClosed()); - - ResultSet res; - - // run some queries - res = stmt.executeQuery("select * from " + tableName); - assertNotNull("ResultSet is null", res); - assertTrue("getResultSet() not returning expected ResultSet", res == stmt - .getResultSet()); - assertEquals("get update count not as expected", 0, stmt.getUpdateCount()); - int i = 0; - - ResultSetMetaData meta = res.getMetaData(); - int expectedColCount = isPartitionTable ? 3 : 2; - assertEquals( - "Unexpected column count", expectedColCount, meta.getColumnCount()); - - String colQualifier = ((tableName != null) && !tableName.isEmpty()) ? tableName.toLowerCase() + "." : ""; - boolean moreRow = res.next(); - while (moreRow) { - try { - i++; - assertEquals(res.getInt(1), res.getInt(colQualifier + "under_col")); - assertEquals(res.getString(1), res.getString(colQualifier + "under_col")); - assertEquals(res.getString(2), res.getString(colQualifier + "value")); - if (isPartitionTable) { - assertEquals(res.getString(3), partitionedColumnValue); - assertEquals(res.getString(3), res.getString(colQualifier + partitionedColumnName)); - } - assertFalse("Last result value was not null", res.wasNull()); - assertNull("No warnings should be found on ResultSet", res - .getWarnings()); - res.clearWarnings(); // verifying that method is supported - - // System.out.println(res.getString(1) + " " + res.getString(2)); - assertEquals( - "getInt and getString don't align for the same result value", - String.valueOf(res.getInt(1)), res.getString(1)); - assertEquals("Unexpected result found", "val_" + res.getString(1), res - .getString(2)); - moreRow = res.next(); - } catch (SQLException e) { - System.out.println(e.toString()); - e.printStackTrace(); - throw new Exception(e.toString()); - } - } - - // supposed to get 500 rows if maxRows isn't set - int expectedRowCount = maxRows > 0 ? maxRows : 500; - assertEquals("Incorrect number of rows returned", expectedRowCount, i); - - // should have no more rows - assertEquals(false, moreRow); - - assertNull("No warnings should be found on statement", stmt.getWarnings()); - stmt.clearWarnings(); // verifying that method is supported - - assertNull("No warnings should be found on connection", con.getWarnings()); - con.clearWarnings(); // verifying that method is supported - - stmt.close(); - assertTrue("Statement should be closed", stmt.isClosed()); - } - - public void testErrorMessages() throws SQLException { - String invalidSyntaxSQLState = "42000"; - - // These tests inherently cause exceptions to be written to the test output - // logs. This is undesirable, since you it might appear to someone looking - // at the test output logs as if something is failing when it isn't. Not - // sure - // how to get around that. - doTestErrorCase("SELECTT * FROM " + tableName, - "cannot recognize input near 'SELECTT' '*' 'FROM'", - invalidSyntaxSQLState, 40000); - doTestErrorCase("SELECT * FROM some_table_that_does_not_exist", - "Table not found", "42S02", 10001); - doTestErrorCase("drop table some_table_that_does_not_exist", - "Table not found", "42S02", 10001); - doTestErrorCase("SELECT invalid_column FROM " + tableName, - "Invalid table alias or column reference", invalidSyntaxSQLState, 10004); - doTestErrorCase("SELECT invalid_function(under_col) FROM " + tableName, - "Invalid function", invalidSyntaxSQLState, 10011); - - // TODO: execute errors like this currently don't return good error - // codes and messages. This should be fixed. - doTestErrorCase( - "create table " + tableName + " (key int, value string)", - "FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask", - "08S01", 1); - } - - private void doTestErrorCase(String sql, String expectedMessage, - String expectedSQLState, int expectedErrorCode) throws SQLException { - Statement stmt = con.createStatement(); - boolean exceptionFound = false; - try { - stmt.executeQuery(sql); - } catch (SQLException e) { - assertTrue("Adequate error messaging not found for '" + sql + "': " - + e.getMessage(), e.getMessage().contains(expectedMessage)); - assertEquals("Expected SQLState not found for '" + sql + "'", - expectedSQLState, e.getSQLState()); - assertEquals("Expected error code not found for '" + sql + "'", - expectedErrorCode, e.getErrorCode()); - exceptionFound = true; - } - - assertNotNull("Exception should have been thrown for query: " + sql, - exceptionFound); - } - - public void testShowTables() throws SQLException { - Statement stmt = con.createStatement(); - assertNotNull("Statement is null", stmt); - - ResultSet res = stmt.executeQuery("show tables"); - - boolean testTableExists = false; - while (res.next()) { - assertNotNull("table name is null in result set", res.getString(1)); - if (tableName.equalsIgnoreCase(res.getString(1))) { - testTableExists = true; - } - } - - assertTrue("table name " + tableName - + " not found in SHOW TABLES result set", testTableExists); - } - - public void testMetaDataGetTables() throws SQLException { - Map tests = new HashMap(); - tests.put("test%jdbc%", new Object[]{"testhivejdbcdriver_table" - , "testhivejdbcdriverpartitionedtable" - , "testhivejdbcdriverview"}); - tests.put("%jdbcdriver\\_table", new Object[]{"testhivejdbcdriver_table"}); - tests.put("testhivejdbcdriver\\_table", new Object[]{"testhivejdbcdriver_table"}); - tests.put("test_ivejdbcdri_er\\_table", new Object[]{"testhivejdbcdriver_table"}); - tests.put("test_ivejdbcdri_er_table", new Object[]{"testhivejdbcdriver_table"}); - tests.put("test_ivejdbcdri_er%table", new Object[]{ - "testhivejdbcdriver_table", "testhivejdbcdriverpartitionedtable" }); - tests.put("%jdbc%", new Object[]{ "testhivejdbcdriver_table" - , "testhivejdbcdriverpartitionedtable" - , "testhivejdbcdriverview"}); - tests.put("", new Object[]{}); - - for (String checkPattern: tests.keySet()) { - ResultSet rs = con.getMetaData().getTables("default", null, checkPattern, null); - int cnt = 0; - while (rs.next()) { - String resultTableName = rs.getString("TABLE_NAME"); - assertEquals("Get by index different from get by name.", rs.getString(3), resultTableName); - assertEquals("Excpected a different table.", tests.get(checkPattern)[cnt], resultTableName); - String resultTableComment = rs.getString("REMARKS"); - assertTrue("Missing comment on the table.", resultTableComment.length()>0); - String tableType = rs.getString("TABLE_TYPE"); - if (resultTableName.endsWith("view")) { - assertEquals("Expected a tabletype view but got something else.", "VIEW", tableType); - } - cnt++; - } - rs.close(); - assertEquals("Received an incorrect number of tables.", tests.get(checkPattern).length, cnt); - } - - // only ask for the views. - ResultSet rs = con.getMetaData().getTables("default", null, null - , new String[]{"VIEW"}); - int cnt=0; - while (rs.next()) { - cnt++; - } - rs.close(); - assertEquals("Incorrect number of views found.", 1, cnt); - } - - public void testMetaDataGetCatalogs() throws SQLException { - ResultSet rs = con.getMetaData().getCatalogs(); - int cnt = 0; - while (rs.next()) { - String catalogname = rs.getString("TABLE_CAT"); - assertEquals("Get by index different from get by name", rs.getString(1), catalogname); - switch(cnt) { - case 0: - assertEquals("default", catalogname); - break; - default: - fail("More then one catalog found."); - break; - } - cnt++; - } - rs.close(); - assertEquals("Incorrect catalog count", 1, cnt); - } - - public void testMetaDataGetSchemas() throws SQLException { - ResultSet rs = con.getMetaData().getSchemas(); - int cnt = 0; - while (rs.next()) { - cnt++; - } - rs.close(); - assertEquals("Incorrect schema count", 0, cnt); - } - - public void testMetaDataGetTableTypes() throws SQLException { - ResultSet rs = con.getMetaData().getTableTypes(); - Set tabletypes = new HashSet(); - tabletypes.add("TABLE"); - tabletypes.add("EXTERNAL TABLE"); - tabletypes.add("VIEW"); - - int cnt = 0; - while (rs.next()) { - String tabletype = rs.getString("TABLE_TYPE"); - assertEquals("Get by index different from get by name", rs.getString(1), tabletype); - tabletypes.remove(tabletype); - cnt++; - } - rs.close(); - assertEquals("Incorrect tabletype count.", 0, tabletypes.size()); - assertTrue("Found less tabletypes then we test for.", cnt >= tabletypes.size()); - } - - public void testMetaDataGetColumns() throws SQLException { - Map tests = new HashMap(); - tests.put(new String[]{"testhivejdbcdriver\\_table", null}, 2); - tests.put(new String[]{"testhivejdbc%", null}, 7); - tests.put(new String[]{"testhiveJDBC%", null}, 7); - tests.put(new String[]{"testhiveJDB\\C%", null}, 0); - tests.put(new String[]{"%jdbcdriver\\_table", null}, 2); - tests.put(new String[]{"%jdbcdriver\\_table%", "under\\_col"}, 1); - tests.put(new String[]{"%jdbcdriver\\_table%", "under\\_COL"}, 1); - tests.put(new String[]{"%jdbcdriver\\_table%", "under\\_co_"}, 1); - tests.put(new String[]{"%jdbcdriver\\_table%", "under_col"}, 1); - tests.put(new String[]{"%jdbcdriver\\_table%", "und%"}, 1); - tests.put(new String[]{"%jdbcdriver\\_table%", "%"}, 2); - tests.put(new String[]{"%jdbcdriver\\_table%", "_%"}, 2); - - for (String[] checkPattern: tests.keySet()) { - ResultSet rs = con.getMetaData().getColumns(null, null, checkPattern[0], - checkPattern[1]); - - // validate the metadata for the getColumns result set - ResultSetMetaData rsmd = rs.getMetaData(); - assertEquals("TABLE_CAT", rsmd.getColumnName(1)); - - int cnt = 0; - while (rs.next()) { - String columnname = rs.getString("COLUMN_NAME"); - int ordinalPos = rs.getInt("ORDINAL_POSITION"); - switch(cnt) { - case 0: - assertEquals("Wrong column name found", "under_col", columnname); - assertEquals("Wrong ordinal position found", ordinalPos, 1); - break; - case 1: - assertEquals("Wrong column name found", "value", columnname); - assertEquals("Wrong ordinal position found", ordinalPos, 2); - break; - default: - break; - } - cnt++; - } - rs.close(); - assertEquals("Found less columns then we test for.", tests.get(checkPattern).intValue(), cnt); - } - } - - /** - * Validate the Metadata for the result set of a metadata getColumns call. - */ - public void testMetaDataGetColumnsMetaData() throws SQLException { - ResultSet rs = con.getMetaData().getColumns(null, null - , "testhivejdbcdriver\\_table", null); - - ResultSetMetaData rsmd = rs.getMetaData(); - - assertEquals("TABLE_CAT", rsmd.getColumnName(1)); - assertEquals(Types.VARCHAR, rsmd.getColumnType(1)); - assertEquals(Integer.MAX_VALUE, rsmd.getColumnDisplaySize(1)); - - assertEquals("ORDINAL_POSITION", rsmd.getColumnName(17)); - assertEquals(Types.INTEGER, rsmd.getColumnType(17)); - assertEquals(11, rsmd.getColumnDisplaySize(17)); - } - - public void testConversionsBaseResultSet() throws SQLException { - ResultSet rs = new HiveMetaDataResultSet(Arrays.asList("key") - , Arrays.asList("long") - , Arrays.asList(1234, "1234", "abc")) { - private int cnt=1; - @Override - public boolean next() throws SQLException { - if (cnt", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(5), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(5), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("c6", meta.getColumnName(6)); - assertEquals(Types.VARCHAR, meta.getColumnType(6)); - assertEquals("string", meta.getColumnTypeName(6)); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(6)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(6)); - assertEquals(0, meta.getScale(6)); - - assertEquals("c6", colRS.getString("COLUMN_NAME")); - assertEquals(Types.VARCHAR, colRS.getInt("DATA_TYPE")); - assertEquals("map", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(6), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(6), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("c7", meta.getColumnName(7)); - assertEquals(Types.VARCHAR, meta.getColumnType(7)); - assertEquals("string", meta.getColumnTypeName(7)); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(7)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(7)); - assertEquals(0, meta.getScale(7)); - - assertEquals("c7", colRS.getString("COLUMN_NAME")); - assertEquals(Types.VARCHAR, colRS.getInt("DATA_TYPE")); - assertEquals("map", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(7), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(7), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("c8", meta.getColumnName(8)); - assertEquals(Types.VARCHAR, meta.getColumnType(8)); - assertEquals("string", meta.getColumnTypeName(8)); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(8)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(8)); - assertEquals(0, meta.getScale(8)); - - assertEquals("c8", colRS.getString("COLUMN_NAME")); - assertEquals(Types.VARCHAR, colRS.getInt("DATA_TYPE")); - assertEquals("struct", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(8), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(8), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("c9", meta.getColumnName(9)); - assertEquals(Types.TINYINT, meta.getColumnType(9)); - assertEquals("tinyint", meta.getColumnTypeName(9)); - assertEquals(4, meta.getColumnDisplaySize(9)); - assertEquals(3, meta.getPrecision(9)); - assertEquals(0, meta.getScale(9)); - - assertEquals("c9", colRS.getString("COLUMN_NAME")); - assertEquals(Types.TINYINT, colRS.getInt("DATA_TYPE")); - assertEquals("tinyint", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(9), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(9), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("c10", meta.getColumnName(10)); - assertEquals(Types.SMALLINT, meta.getColumnType(10)); - assertEquals("smallint", meta.getColumnTypeName(10)); - assertEquals(6, meta.getColumnDisplaySize(10)); - assertEquals(5, meta.getPrecision(10)); - assertEquals(0, meta.getScale(10)); - - assertEquals("c10", colRS.getString("COLUMN_NAME")); - assertEquals(Types.SMALLINT, colRS.getInt("DATA_TYPE")); - assertEquals("smallint", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(10), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(10), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("c11", meta.getColumnName(11)); - assertEquals(Types.FLOAT, meta.getColumnType(11)); - assertEquals("float", meta.getColumnTypeName(11)); - assertEquals(24, meta.getColumnDisplaySize(11)); - assertEquals(7, meta.getPrecision(11)); - assertEquals(7, meta.getScale(11)); - - assertEquals("c11", colRS.getString("COLUMN_NAME")); - assertEquals(Types.FLOAT, colRS.getInt("DATA_TYPE")); - assertEquals("float", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(11), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(11), colRS.getInt("DECIMAL_DIGITS")); - - assertTrue(colRS.next()); - - assertEquals("c12", meta.getColumnName(12)); - assertEquals(Types.BIGINT, meta.getColumnType(12)); - assertEquals("bigint", meta.getColumnTypeName(12)); - assertEquals(20, meta.getColumnDisplaySize(12)); - assertEquals(19, meta.getPrecision(12)); - assertEquals(0, meta.getScale(12)); - - assertEquals("c12", colRS.getString("COLUMN_NAME")); - assertEquals(Types.BIGINT, colRS.getInt("DATA_TYPE")); - assertEquals("bigint", colRS.getString("TYPE_NAME").toLowerCase()); - assertEquals(meta.getPrecision(12), colRS.getInt("COLUMN_SIZE")); - assertEquals(meta.getScale(12), colRS.getInt("DECIMAL_DIGITS")); - - assertEquals("_c12", meta.getColumnName(13)); - assertEquals(Types.INTEGER, meta.getColumnType(13)); - assertEquals("int", meta.getColumnTypeName(13)); - assertEquals(11, meta.getColumnDisplaySize(13)); - assertEquals(10, meta.getPrecision(13)); - assertEquals(0, meta.getScale(13)); - - assertEquals("b", meta.getColumnName(14)); - assertEquals(Types.VARCHAR, meta.getColumnType(14)); - assertEquals("string", meta.getColumnTypeName(14)); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(14)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(14)); - assertEquals(0, meta.getScale(14)); - - assertEquals("c17", meta.getColumnName(15)); - assertEquals(Types.TIMESTAMP, meta.getColumnType(15)); - assertEquals("timestamp", meta.getColumnTypeName(15)); - assertEquals(29, meta.getColumnDisplaySize(15)); - assertEquals(29, meta.getPrecision(15)); - assertEquals(9, meta.getScale(15)); - - assertEquals("c18", meta.getColumnName(16)); - assertEquals(Types.DECIMAL, meta.getColumnType(16)); - assertEquals("decimal", meta.getColumnTypeName(16)); - assertEquals(Integer.MAX_VALUE, meta.getColumnDisplaySize(16)); - assertEquals(Integer.MAX_VALUE, meta.getPrecision(16)); - assertEquals(Integer.MAX_VALUE, meta.getScale(16)); - - assertEquals("c20", meta.getColumnName(17)); - assertEquals(Types.DATE, meta.getColumnType(17)); - assertEquals("date", meta.getColumnTypeName(17)); - assertEquals(10, meta.getColumnDisplaySize(17)); - assertEquals(10, meta.getPrecision(17)); - assertEquals(0, meta.getScale(17)); - - for (int i = 1; i <= meta.getColumnCount(); i++) { - assertFalse(meta.isAutoIncrement(i)); - assertFalse(meta.isCurrency(i)); - assertEquals(ResultSetMetaData.columnNullable, meta.isNullable(i)); - } - } - - // [url] [host] [port] [db] - private static final String[][] URL_PROPERTIES = new String[][] { - {"jdbc:hive://", "", "", "default"}, - {"jdbc:hive://localhost:10001/default", "localhost", "10001", "default"}, - {"jdbc:hive://localhost/notdefault", "localhost", "10000", "notdefault"}, - {"jdbc:hive://foo:1243", "foo", "1243", "default"}}; - - public void testDriverProperties() throws SQLException { - HiveDriver driver = new HiveDriver(); - - for (String[] testValues : URL_PROPERTIES) { - DriverPropertyInfo[] dpi = driver.getPropertyInfo(testValues[0], null); - assertEquals("unexpected DriverPropertyInfo array size", 3, dpi.length); - assertDpi(dpi[0], "HOST", testValues[1]); - assertDpi(dpi[1], "PORT", testValues[2]); - assertDpi(dpi[2], "DBNAME", testValues[3]); - } - - } - - public void testInvalidUrl() throws SQLException { - HiveDriver driver = new HiveDriver(); - - assertNull(driver.connect("jdbc:hive2://localhost:1000", null)); - } - - private static void assertDpi(DriverPropertyInfo dpi, String name, - String value) { - assertEquals("Invalid DriverPropertyInfo name", name, dpi.name); - assertEquals("Invalid DriverPropertyInfo value", value, dpi.value); - assertEquals("Invalid DriverPropertyInfo required", false, dpi.required); - } - - - /** - * validate schema generated by "set" command - * @throws SQLException - */ - public void testSetCommand() throws SQLException { - // execute set command - String sql = "set -v"; - Statement stmt = con.createStatement(); - ResultSet res = stmt.executeQuery(sql); - - // Validate resultset columns - ResultSetMetaData md = res.getMetaData() ; - assertEquals(1, md.getColumnCount()); - assertEquals(SET_COLUMN_NAME, md.getColumnLabel(1)); - - //check if there is data in the resultset - assertTrue("Nothing returned by set -v", res.next()); - - res.close(); - stmt.close(); - } - - public void testShowGrant() throws SQLException { - Statement stmt = con.createStatement(); - stmt.execute("grant select on table " + dataTypeTableName + " to user hive_test_user"); - stmt.execute("show grant user hive_test_user on table " + dataTypeTableName); - - ResultSet res = stmt.getResultSet(); - assertTrue(res.next()); - assertEquals("default", res.getString(1)); - assertEquals(dataTypeTableName, res.getString(2)); - assertEquals("", res.getString(3)); // partition - assertEquals("", res.getString(4)); // column - assertEquals("hive_test_user", res.getString(5)); - assertEquals("USER", res.getString(6)); - assertEquals("SELECT", res.getString(7)); - assertEquals(false, res.getBoolean(8)); // grant option - assertEquals(-1, res.getLong(9)); - assertNotNull(res.getString(10)); // grantor - assertFalse(res.next()); - res.close(); - } - - public void testShowRoleGrant() throws SQLException { - Statement stmt = con.createStatement(); - - // drop role. ignore error. - try { - stmt.execute("drop role role1"); - } catch (Exception ex) { - LOG.warn("Ignoring error during drop role: " + ex); - } - - stmt.execute("create role role1"); - stmt.execute("grant role role1 to user hive_test_user"); - stmt.execute("show role grant user hive_test_user"); - - ResultSet res = stmt.getResultSet(); - assertTrue(res.next()); - assertEquals("public", res.getString(1)); - assertTrue(res.next()); - assertEquals("role1", res.getString(1)); - res.close(); - } -} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java new file mode 100644 index 0000000..cceac93 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java @@ -0,0 +1,274 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import static org.junit.Assert.*; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.Lists; + +public class TestFilterHooks { + + public static class DummyMetaStoreFilterHookImpl extends DefaultMetaStoreFilterHookImpl { + public static boolean blockResults = false; + + public DummyMetaStoreFilterHookImpl(HiveConf conf) { + super(conf); + } + + @Override + public List filterDatabases(List dbList) { + if (blockResults) { + return new ArrayList(); + } + return super.filterDatabases(dbList); + } + + @Override + public Database filterDatabase(Database dataBase) throws NoSuchObjectException { + if (blockResults) { + throw new NoSuchObjectException("Blocked access"); + } + return super.filterDatabase(dataBase); + } + + @Override + public List filterTableNames(String dbName, List tableList) { + if (blockResults) { + return new ArrayList(); + } + return super.filterTableNames(dbName, tableList); + } + + @Override + public Table filterTable(Table table) throws NoSuchObjectException { + if (blockResults) { + throw new NoSuchObjectException("Blocked access"); + } + return super.filterTable(table); + } + + @Override + public List filterTables(List
tableList) { + if (blockResults) { + return new ArrayList
(); + } + return super.filterTables(tableList); + } + + @Override + public List filterPartitions(List partitionList) { + if (blockResults) { + return new ArrayList(); + } + return super.filterPartitions(partitionList); + } + + @Override + public List filterPartitionSpecs( + List partitionSpecList) { + if (blockResults) { + return new ArrayList(); + } + return super.filterPartitionSpecs(partitionSpecList); + } + + @Override + public Partition filterPartition(Partition partition) throws NoSuchObjectException { + if (blockResults) { + throw new NoSuchObjectException("Blocked access"); + } + return super.filterPartition(partition); + } + + @Override + public List filterPartitionNames(String dbName, String tblName, + List partitionNames) { + if (blockResults) { + return new ArrayList(); + } + return super.filterPartitionNames(dbName, tblName, partitionNames); + } + + @Override + public Index filterIndex(Index index) throws NoSuchObjectException { + if (blockResults) { + throw new NoSuchObjectException("Blocked access"); + } + return super.filterIndex(index); + } + + @Override + public List filterIndexNames(String dbName, String tblName, + List indexList) { + if (blockResults) { + return new ArrayList(); + } + return super.filterIndexNames(dbName, tblName, indexList); + } + + @Override + public List filterIndexes(List indexeList) { + if (blockResults) { + return new ArrayList(); + } + return super.filterIndexes(indexeList); + } + } + + private static final String DBNAME1 = "testdb1"; + private static final String DBNAME2 = "testdb2"; + private static final String TAB1 = "tab1"; + private static final String TAB2 = "tab2"; + private static final String INDEX1 = "idx1"; + private HiveConf hiveConf; + private HiveMetaStoreClient msc; + private Driver driver; + + @Before + public void setUp() throws Exception { + DummyMetaStoreFilterHookImpl.blockResults = false; + int port = MetaStoreUtils.findFreePort(); + MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge()); + + hiveConf = new HiveConf(this.getClass()); + hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); + hiveConf.setVar(ConfVars.METASTORE_FILTER_HOOK, DummyMetaStoreFilterHookImpl.class.getName()); + SessionState.start(new CliSessionState(hiveConf)); + msc = new HiveMetaStoreClient(hiveConf, null); + driver = new Driver(hiveConf); + + driver.run("drop database if exists " + DBNAME1 + " cascade"); + driver.run("drop database if exists " + DBNAME2 + " cascade"); + driver.run("create database " + DBNAME1); + driver.run("create database " + DBNAME2); + driver.run("use " + DBNAME1); + driver.run("create table " + DBNAME1 + "." + TAB1 + " (id int, name string)"); + driver.run("create table " + TAB2 + " (id int) partitioned by (name string)"); + driver.run("ALTER TABLE " + TAB2 + " ADD PARTITION (name='value1')"); + driver.run("ALTER TABLE " + TAB2 + " ADD PARTITION (name='value2')"); + driver.run("CREATE INDEX " + INDEX1 + " on table " + TAB1 + "(id) AS 'COMPACT' WITH DEFERRED REBUILD"); + } + + @After + public void tearDown() throws Exception { + DummyMetaStoreFilterHookImpl.blockResults = false; + driver.run("drop database if exists " + DBNAME1 + " cascade"); + driver.run("drop database if exists " + DBNAME2 + " cascade"); + driver.close(); + driver.destroy(); + msc.close(); + } + + @Test + public void testDefaultFilter() throws Exception { + assertNotNull(msc.getTable(DBNAME1, TAB1)); + assertEquals(3, msc.getTables(DBNAME1, "*").size()); + assertEquals(3, msc.getAllTables(DBNAME1).size()); + assertEquals(1, msc.getTables(DBNAME1, TAB2).size()); + assertEquals(0, msc.getAllTables(DBNAME2).size()); + + assertNotNull(msc.getDatabase(DBNAME1)); + assertEquals(3, msc.getDatabases("*").size()); + assertEquals(3, msc.getAllDatabases().size()); + assertEquals(1, msc.getDatabases(DBNAME1).size()); + + assertNotNull(msc.getPartition(DBNAME1, TAB2, "name=value1")); + assertEquals(1, msc.getPartitionsByNames(DBNAME1, TAB2, Lists.newArrayList("name=value1")).size()); + + assertNotNull(msc.getIndex(DBNAME1, TAB1, INDEX1)); + } + + @Test + public void testDummyFilterForTables() throws Exception { + DummyMetaStoreFilterHookImpl.blockResults = true; + try { + msc.getTable(DBNAME1, TAB1); + fail("getTable() should fail with blocking mode"); + } catch (NoSuchObjectException e) { + // Excepted + } + assertEquals(0, msc.getTables(DBNAME1, "*").size()); + assertEquals(0, msc.getAllTables(DBNAME1).size()); + assertEquals(0, msc.getTables(DBNAME1, TAB2).size()); + } + + @Test + public void testDummyFilterForDb() throws Exception { + DummyMetaStoreFilterHookImpl.blockResults = true; + try { + assertNotNull(msc.getDatabase(DBNAME1)); + fail("getDatabase() should fail with blocking mode"); + } catch (NoSuchObjectException e) { + // Excepted + } + assertEquals(0, msc.getDatabases("*").size()); + assertEquals(0, msc.getAllDatabases().size()); + assertEquals(0, msc.getDatabases(DBNAME1).size()); + } + + @Test + public void testDummyFilterForPartition() throws Exception { + DummyMetaStoreFilterHookImpl.blockResults = true; + try { + assertNotNull(msc.getPartition(DBNAME1, TAB2, "name=value1")); + fail("getPartition() should fail with blocking mode"); + } catch (NoSuchObjectException e) { + // Excepted + } + assertEquals(0, msc.getPartitionsByNames(DBNAME1, TAB2, + Lists.newArrayList("name=value1")).size()); + } + + @Test + public void testDummyFilterForIndex() throws Exception { + DummyMetaStoreFilterHookImpl.blockResults = true; + try { + assertNotNull(msc.getIndex(DBNAME1, TAB1, INDEX1)); + fail("getPartition() should fail with blocking mode"); + } catch (NoSuchObjectException e) { + // Excepted + } + } + +} diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index 303e306..c128010 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -43,6 +43,7 @@ import java.util.Arrays; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; /** */ @@ -229,9 +230,9 @@ public void testStatsAfterCompactionPartTbl() throws Exception { Worker t = new Worker(); t.setThreadId((int) t.getId()); t.setHiveConf(conf); - MetaStoreThread.BooleanPointer stop = new MetaStoreThread.BooleanPointer(); - MetaStoreThread.BooleanPointer looped = new MetaStoreThread.BooleanPointer(); - stop.boolVal = true; + AtomicBoolean stop = new AtomicBoolean(); + AtomicBoolean looped = new AtomicBoolean(); + stop.set(true); t.init(stop, looped); t.run(); ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/service/TestHiveServer.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/service/TestHiveServer.java index 6188b19..e69de29 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/service/TestHiveServer.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/service/TestHiveServer.java @@ -1,424 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.service; - -import java.util.List; -import java.util.Properties; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ServerUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Schema; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe; -import org.apache.hadoop.io.BytesWritable; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; - -/** - * TestHiveServer. - * - */ -public class TestHiveServer extends TestCase { - - private HiveInterface client; - private static final String host = "localhost"; - private static final int port = 10000; - private final Path dataFilePath; - - private static String tableName = "testhivedrivertable"; - private final HiveConf conf; - private boolean standAloneServer = false; - private TTransport transport; - private final String invalidPath; - - public TestHiveServer(String name) { - super(name); - conf = new HiveConf(TestHiveServer.class); - String dataFileDir = conf.get("test.data.files").replace('\\', '/') - .replace("c:", ""); - invalidPath = dataFileDir+"/invalidpath/"; - dataFilePath = new Path(dataFileDir, "kv1.txt"); - // See data/conf/hive-site.xml - String paramStr = System.getProperty("test.service.standalone.server"); - if (paramStr != null && paramStr.equals("true")) { - standAloneServer = true; - } - } - - @Override - protected void setUp() throws Exception { - super.setUp(); - - if (standAloneServer) { - try { - transport = new TSocket(host, port); - TProtocol protocol = new TBinaryProtocol(transport); - client = new HiveClient(protocol); - transport.open(); - } catch (Throwable e) { - e.printStackTrace(); - } - } else { - client = new HiveServer.HiveServerHandler(); - } - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); - if (standAloneServer) { - try { - client.clean(); - } catch (Exception e) { - e.printStackTrace(); - } - transport.close(); - } - } - - public void testExecute() throws Exception { - try { - client.execute("set hive.support.concurrency = false"); - client.execute("drop table " + tableName); - } catch (Exception ex) { - } - - try { - client.execute("create table " + tableName + " (num int)"); - client.execute("load data local inpath '" + dataFilePath.toString() - + "' into table " + tableName); - client.execute("select count(1) as cnt from " + tableName); - String row = client.fetchOne(); - assertEquals(row, "500"); - - Schema hiveSchema = client.getSchema(); - List listFields = hiveSchema.getFieldSchemas(); - assertEquals(listFields.size(), 1); - assertEquals(listFields.get(0).getName(), "cnt"); - assertEquals(listFields.get(0).getType(), "bigint"); - - Schema thriftSchema = client.getThriftSchema(); - List listThriftFields = thriftSchema.getFieldSchemas(); - assertEquals(listThriftFields.size(), 1); - assertEquals(listThriftFields.get(0).getName(), "cnt"); - assertEquals(listThriftFields.get(0).getType(), "i64"); - - client.execute("drop table " + tableName); - } catch (Throwable t) { - t.printStackTrace(); - } - } - - public void notestExecute() throws Exception { - try { - client.execute("set hive.support.concurrency = false"); - client.execute("drop table " + tableName); - } catch (Exception ex) { - } - - client.execute("create table " + tableName + " (num int)"); - client.execute("load data local inpath '" + dataFilePath.toString() - + "' into table " + tableName); - client.execute("select count(1) from " + tableName); - String row = client.fetchOne(); - assertEquals(row, "500"); - client.execute("drop table " + tableName); - transport.close(); - } - - public void testNonHiveCommand() throws Exception { - try { - client.execute("set hive.support.concurrency = false"); - client.execute("drop table " + tableName); - } catch (Exception ex) { - } - - client.execute("create table " + tableName + " (num int)"); - client.execute("load data local inpath '" + dataFilePath.toString() - + "' into table " + tableName); - - // Command not part of HiveQL - verify no results - client.execute("SET hive.mapred.mode = nonstrict"); - - Schema schema = client.getSchema(); - assertEquals(schema.getFieldSchemasSize(), 0); - assertEquals(schema.getPropertiesSize(), 0); - - Schema thriftschema = client.getThriftSchema(); - assertEquals(thriftschema.getFieldSchemasSize(), 0); - assertEquals(thriftschema.getPropertiesSize(), 0); - - try { - String ret = client.fetchOne(); - assertTrue(false); - } catch (HiveServerException e) { - assertEquals(e.getErrorCode(), 0); - } - assertEquals(client.fetchN(10).size(), 0); - assertEquals(client.fetchAll().size(), 0); - - // Execute Hive query and fetch - client.execute("select * from " + tableName + " limit 10"); - client.fetchOne(); - - // Re-execute command not part of HiveQL - verify still no results - client.execute("SET hive.mapred.mode = nonstrict"); - - schema = client.getSchema(); - assertEquals(schema.getFieldSchemasSize(), 0); - assertEquals(schema.getPropertiesSize(), 0); - - thriftschema = client.getThriftSchema(); - assertEquals(thriftschema.getFieldSchemasSize(), 0); - assertEquals(thriftschema.getPropertiesSize(), 0); - - try { - String ret = client.fetchOne(); - assertTrue(false); - } catch (HiveServerException e) { - assertEquals(e.getErrorCode(), 0); - } - assertEquals(client.fetchN(10).size(), 0); - assertEquals(client.fetchAll().size(), 0); - - // Cleanup - client.execute("drop table " + tableName); - } - - /** - * Test metastore call. - */ - public void testMetastore() throws Exception { - try { - client.execute("set hive.support.concurrency = false"); - client.execute("drop table " + tableName); - } catch (Exception ex) { - } - - client.execute("create table " + tableName + " (num int)"); - List tabs = client.get_tables("default", tableName); - assertEquals(tabs.get(0), tableName); - client.execute("drop table " + tableName); - } - - /** - * Test cluster status retrieval. - */ - public void testGetClusterStatus() throws Exception { - HiveClusterStatus clusterStatus = client.getClusterStatus(); - assertNotNull(clusterStatus); - assertTrue(clusterStatus.getTaskTrackers() >= 0); - assertTrue(clusterStatus.getMapTasks() >= 0); - assertTrue(clusterStatus.getReduceTasks() >= 0); - assertTrue(clusterStatus.getMaxMapTasks() >= 0); - assertTrue(clusterStatus.getMaxReduceTasks() >= 0); - assertTrue(clusterStatus.getState() == JobTrackerState.INITIALIZING - || clusterStatus.getState() == JobTrackerState.RUNNING); - } - - /** - * - */ - public void testFetch() throws Exception { - // create and populate a table with 500 rows. - try { - client.execute("set hive.support.concurrency = false"); - client.execute("drop table " + tableName); - } catch (Exception ex) { - } - client.execute("create table " + tableName + " (key int, value string)"); - client.execute("load data local inpath '" + dataFilePath.toString() - + "' into table " + tableName); - - try { - // fetchAll test - client.execute("select key, value from " + tableName); - assertEquals(client.fetchAll().size(), 500); - assertEquals(client.fetchAll().size(), 0); - - // fetchOne test - client.execute("select key, value from " + tableName); - for (int i = 0; i < 500; i++) { - try { - String str = client.fetchOne(); - } catch (HiveServerException e) { - assertTrue(false); - } - } - try { - client.fetchOne(); - } catch (HiveServerException e) { - assertEquals(e.getErrorCode(), 0); - } - - // fetchN test - client.execute("select key, value from " + tableName); - assertEquals(client.fetchN(499).size(), 499); - assertEquals(client.fetchN(499).size(), 1); - assertEquals(client.fetchN(499).size(), 0); - } catch (Throwable e) { - e.printStackTrace(); - } - } - - public void testDynamicSerde() throws Exception { - try { - client.execute("set hive.support.concurrency = false"); - client.execute("drop table " + tableName); - } catch (Exception ex) { - } - - client.execute("create table " + tableName + " (key int, value string)"); - client.execute("load data local inpath '" + dataFilePath.toString() - + "' into table " + tableName); - // client.execute("select key, count(1) from " + tableName + - // " where key > 10 group by key"); - String sql = "select key, value from " + tableName + " where key > 10"; - client.execute(sql); - - // Instantiate DynamicSerDe - DynamicSerDe ds = new DynamicSerDe(); - Properties dsp = new Properties(); - dsp.setProperty(serdeConstants.SERIALIZATION_FORMAT, - org.apache.hadoop.hive.serde2.thrift.TCTLSeparatedProtocol.class - .getName()); - dsp.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_NAME, - "result"); - String serDDL = new String("struct result { "); - List schema = client.getThriftSchema().getFieldSchemas(); - for (int pos = 0; pos < schema.size(); pos++) { - if (pos != 0) { - serDDL = serDDL.concat(","); - } - serDDL = serDDL.concat(schema.get(pos).getType()); - serDDL = serDDL.concat(" "); - serDDL = serDDL.concat(schema.get(pos).getName()); - } - serDDL = serDDL.concat("}"); - - dsp.setProperty(serdeConstants.SERIALIZATION_DDL, serDDL); - dsp.setProperty(serdeConstants.SERIALIZATION_LIB, ds.getClass().toString()); - dsp.setProperty(serdeConstants.FIELD_DELIM, "9"); - ds.initialize(new Configuration(), dsp); - - String row = client.fetchOne(); - Object o = ds.deserialize(new BytesWritable(row.getBytes())); - - assertEquals(o.getClass().toString(), "class java.util.ArrayList"); - List lst = (List) o; - assertEquals(lst.get(0), 238); - - // TODO: serde doesn't like underscore -- struct result { string _c0} - sql = "select count(1) as c from " + tableName; - client.execute(sql); - row = client.fetchOne(); - - serDDL = new String("struct result { "); - schema = client.getThriftSchema().getFieldSchemas(); - for (int pos = 0; pos < schema.size(); pos++) { - if (pos != 0) { - serDDL = serDDL.concat(","); - } - serDDL = serDDL.concat(schema.get(pos).getType()); - serDDL = serDDL.concat(" "); - serDDL = serDDL.concat(schema.get(pos).getName()); - } - serDDL = serDDL.concat("}"); - - dsp.setProperty(serdeConstants.SERIALIZATION_DDL, serDDL); - // Need a new DynamicSerDe instance - re-initialization is not supported. - ds = new DynamicSerDe(); - ds.initialize(new Configuration(), dsp); - o = ds.deserialize(new BytesWritable(row.getBytes())); - } - - public void testAddJarShouldFailIfJarNotExist() throws Exception { - boolean queryExecutionFailed = false; - try { - client.execute("add jar " + invalidPath + "sample.jar"); - } catch (Exception e) { - queryExecutionFailed = true; - } - if (!queryExecutionFailed) { - fail("It should throw exception since jar does not exist"); - } - } - - public void testAddFileShouldFailIfFileNotExist() throws Exception { - boolean queryExecutionFailed = false; - try { - client.execute("add file " + invalidPath + "sample.txt"); - } catch (Exception e) { - queryExecutionFailed = true; - } - if (!queryExecutionFailed) { - fail("It should throw exception since file does not exist"); - } - } - - public void testAddArchiveShouldFailIfFileNotExist() throws Exception { - boolean queryExecutionFailed = false; - try { - client.execute("add archive " + invalidPath + "sample.zip"); - } catch (Exception e) { - queryExecutionFailed = true; - } - if (!queryExecutionFailed) { - fail("It should trow exception since archive does not exist"); - } - } - - public void testScratchDirShouldNotClearWhileStartup() throws Exception { - FileSystem fs = FileSystem.get(conf); - Path scratchDirPath = new Path(HiveConf.getVar(conf, - HiveConf.ConfVars.SCRATCHDIR)); - boolean fileExists = fs.exists(scratchDirPath); - if (!fileExists) { - fileExists = fs.mkdirs(scratchDirPath); - } - ServerUtils.cleanUpScratchDir(conf); - assertTrue("Scratch dir is not available after startup", fs.exists(scratchDirPath)); - } - - public void testScratchDirShouldClearWhileStartup() throws Exception { - FileSystem fs = FileSystem.get(conf); - Path scratchDirPath = new Path(HiveConf.getVar(conf, - HiveConf.ConfVars.SCRATCHDIR)); - boolean fileExists = fs.exists(scratchDirPath); - if (!fileExists) { - fileExists = fs.mkdirs(scratchDirPath); - } - try { - conf.setBoolVar(HiveConf.ConfVars.HIVE_START_CLEANUP_SCRATCHDIR, true); - ServerUtils.cleanUpScratchDir(conf); - } finally { - conf.setBoolVar(HiveConf.ConfVars.HIVE_START_CLEANUP_SCRATCHDIR, false); - } - assertFalse("Scratch dir is available after startup", fs.exists(scratchDirPath)); - } - -} diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java index 8126cdf..4fa8aef 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java @@ -529,7 +529,7 @@ public void testEmbeddedBeelineConnection() throws Throwable{ public void testQueryProgress() throws Throwable { final String SCRIPT_TEXT = "set hive.support.concurrency = false;\n" + "select count(*) from " + tableName + ";\n"; - final String EXPECTED_PATTERN = "Parsing command"; + final String EXPECTED_PATTERN = "number of splits"; testScriptFile(SCRIPT_TEXT, EXPECTED_PATTERN, true, getBaseArgs(miniHS2.getBaseJdbcURL())); } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index 47a462a..f2560e2 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -51,6 +51,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.ql.exec.UDF; import org.apache.hadoop.hive.ql.processors.DfsProcessor; @@ -105,6 +106,7 @@ public TestJdbcDriver2() { public static void setUpBeforeClass() throws SQLException, ClassNotFoundException{ Class.forName(driverName); Connection con1 = getConnection("default"); + System.setProperty(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_VERBOSE.varname, "" + true); Statement stmt1 = con1.createStatement(); assertNotNull("Statement is null", stmt1); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java index ebda296..e7be18a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java @@ -18,6 +18,7 @@ package org.apache.hive.service.cli; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hive.service.cli.thrift.EmbeddedThriftBinaryCLIService; import org.apache.hive.service.cli.thrift.ThriftCLIService; import org.apache.hive.service.cli.thrift.ThriftCLIServiceClient; @@ -35,6 +36,7 @@ @BeforeClass public static void setUpBeforeClass() throws Exception { service = new EmbeddedThriftBinaryCLIService(); + service.init(new HiveConf()); client = new ThriftCLIServiceClient(service); } diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml index 376f4a9..a6a7547 100644 --- a/itests/qtest/pom.xml +++ b/itests/qtest/pom.xml @@ -414,22 +414,6 @@ - - - - row; - protected List columnNames; - protected List columnTypes; - - public boolean absolute(int row) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void afterLast() throws SQLException { - throw new SQLException("Method not supported"); - } - - public void beforeFirst() throws SQLException { - throw new SQLException("Method not supported"); - } - - public void cancelRowUpdates() throws SQLException { - throw new SQLException("Method not supported"); - } - - public void deleteRow() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int findColumn(String columnName) throws SQLException { - int columnIndex = columnNames.indexOf(columnName); - if (columnIndex==-1) { - throw new SQLException(); - } else { - return ++columnIndex; - } - } - - public boolean first() throws SQLException { - throw new SQLException("Method not supported"); - } - - public Array getArray(int i) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Array getArray(String colName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public InputStream getAsciiStream(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public InputStream getAsciiStream(String columnName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - Object obj = getObject(columnIndex); - if (obj == null) { - return null; - } - if (obj instanceof BigDecimal) { - return ((BigDecimal) obj); - } - if (obj instanceof HiveDecimal) { - return ((HiveDecimal) obj).bigDecimalValue(); - } - throw new SQLException("Cannot convert column " + columnIndex - + " to BigDecimal. Found data of type: " - + obj.getClass()+", value: " + obj.toString()); - } - - public BigDecimal getBigDecimal(String columnName) throws SQLException { - return getBigDecimal(findColumn(columnName)); - } - - public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - MathContext mc = new MathContext(scale); - return getBigDecimal(columnIndex).round(mc); - } - - public BigDecimal getBigDecimal(String columnName, int scale) throws SQLException { - return getBigDecimal(findColumn(columnName), scale); - } - - public InputStream getBinaryStream(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public InputStream getBinaryStream(String columnName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Blob getBlob(int i) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Blob getBlob(String colName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean getBoolean(int columnIndex) throws SQLException { - Object obj = getObject(columnIndex); - if (Boolean.class.isInstance(obj)) { - return (Boolean) obj; - } else if (obj == null) { - return false; - } else if (Number.class.isInstance(obj)) { - return ((Number) obj).intValue() != 0; - } else if (String.class.isInstance(obj)) { - return !((String) obj).equals("0"); - } - throw new SQLException("Cannot convert column " + columnIndex + " to boolean"); - } - - public boolean getBoolean(String columnName) throws SQLException { - return getBoolean(findColumn(columnName)); - } - - public byte getByte(int columnIndex) throws SQLException { - Object obj = getObject(columnIndex); - if (Number.class.isInstance(obj)) { - return ((Number) obj).byteValue(); - } else if (obj == null) { - return 0; - } - throw new SQLException("Cannot convert column " + columnIndex + " to byte"); - } - - public byte getByte(String columnName) throws SQLException { - return getByte(findColumn(columnName)); - } - - public byte[] getBytes(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public byte[] getBytes(String columnName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Reader getCharacterStream(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Reader getCharacterStream(String columnName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Clob getClob(int i) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Clob getClob(String colName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getConcurrency() throws SQLException { - return ResultSet.CONCUR_READ_ONLY; - } - - public String getCursorName() throws SQLException { - throw new SQLException("Method not supported"); - } - - public Date getDate(int columnIndex) throws SQLException { - Object obj = getObject(columnIndex); - if (obj == null) { - return null; - } - - if (obj instanceof Date) { - return (Date) obj; - } - - try { - if (obj instanceof String) { - return Date.valueOf((String)obj); - } - } catch (Exception e) { - throw new SQLException("Cannot convert column " + columnIndex - + " to date: " + e.toString()); - } - - throw new SQLException("Illegal conversion"); - } - - public Date getDate(String columnName) throws SQLException { - return getDate(findColumn(columnName)); - } - - public Date getDate(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Date getDate(String columnName, Calendar cal) throws SQLException { - throw new SQLException("Method not supported"); - } - - public double getDouble(int columnIndex) throws SQLException { - try { - Object obj = getObject(columnIndex); - if (Number.class.isInstance(obj)) { - return ((Number) obj).doubleValue(); - } else if (obj == null) { - return 0; - } else if (String.class.isInstance(obj)) { - return Double.valueOf((String)obj); - } - throw new Exception("Illegal conversion"); - } catch (Exception e) { - throw new SQLException("Cannot convert column " + columnIndex - + " to double: " + e.toString()); - } - } - - public double getDouble(String columnName) throws SQLException { - return getDouble(findColumn(columnName)); - } - - public int getFetchDirection() throws SQLException { - return ResultSet.FETCH_FORWARD; - } - - public int getFetchSize() throws SQLException { - throw new SQLException("Method not supported"); - } - - public float getFloat(int columnIndex) throws SQLException { - try { - Object obj = getObject(columnIndex); - if (Number.class.isInstance(obj)) { - return ((Number) obj).floatValue(); - } else if (obj == null) { - return 0; - } else if (String.class.isInstance(obj)) { - return Float.valueOf((String)obj); - } - throw new Exception("Illegal conversion"); - } catch (Exception e) { - throw new SQLException("Cannot convert column " + columnIndex - + " to float: " + e.toString()); - } - } - - public float getFloat(String columnName) throws SQLException { - return getFloat(findColumn(columnName)); - } - - public int getHoldability() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getInt(int columnIndex) throws SQLException { - try { - Object obj = getObject(columnIndex); - if (Number.class.isInstance(obj)) { - return ((Number) obj).intValue(); - } else if (obj == null) { - return 0; - } else if (String.class.isInstance(obj)) { - return Integer.valueOf((String)obj); - } - throw new Exception("Illegal conversion"); - } catch (Exception e) { - throw new SQLException("Cannot convert column " + columnIndex + " to integer" + e.toString()); - } - } - - public int getInt(String columnName) throws SQLException { - return getInt(findColumn(columnName)); - } - - public long getLong(int columnIndex) throws SQLException { - try { - Object obj = getObject(columnIndex); - if (Number.class.isInstance(obj)) { - return ((Number) obj).longValue(); - } else if (obj == null) { - return 0; - } else if (String.class.isInstance(obj)) { - return Long.valueOf((String)obj); - } - throw new Exception("Illegal conversion"); - } catch (Exception e) { - throw new SQLException("Cannot convert column " + columnIndex + " to long: " + e.toString()); - } - } - - public long getLong(String columnName) throws SQLException { - return getLong(findColumn(columnName)); - } - - public ResultSetMetaData getMetaData() throws SQLException { - return new HiveResultSetMetaData(columnNames, columnTypes); - } - - public Reader getNCharacterStream(int arg0) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Reader getNCharacterStream(String arg0) throws SQLException { - throw new SQLException("Method not supported"); - } - - public NClob getNClob(int arg0) throws SQLException { - throw new SQLException("Method not supported"); - } - - public NClob getNClob(String columnLabel) throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getNString(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getNString(String columnLabel) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Object getObject(int columnIndex) throws SQLException { - if (row == null) { - throw new SQLException("No row found."); - } - - if (columnIndex > row.size()) { - throw new SQLException("Invalid columnIndex: " + columnIndex); - } - - try { - wasNull = false; - if (row.get(columnIndex - 1) == null) { - wasNull = true; - } - - return row.get(columnIndex - 1); - } catch (Exception e) { - throw new SQLException(e.toString()); - } - } - - public Object getObject(String columnName) throws SQLException { - return getObject(findColumn(columnName)); - } - - public T getObject(int columnIndex, Class type) throws SQLException { - // TODO method required by JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(String columnLabel, Class type) throws SQLException { - // TODO method required by JDK 1.7 - throw new SQLException("Method not supported"); - } - - public Object getObject(int i, Map> map) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Object getObject(String colName, Map> map) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Ref getRef(int i) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Ref getRef(String colName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getRow() throws SQLException { - throw new SQLException("Method not supported"); - } - - public RowId getRowId(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public RowId getRowId(String columnLabel) throws SQLException { - throw new SQLException("Method not supported"); - } - - public SQLXML getSQLXML(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public SQLXML getSQLXML(String columnLabel) throws SQLException { - throw new SQLException("Method not supported"); - } - - public short getShort(int columnIndex) throws SQLException { - try { - Object obj = getObject(columnIndex); - if (Number.class.isInstance(obj)) { - return ((Number) obj).shortValue(); - } else if (obj == null) { - return 0; - } else if (String.class.isInstance(obj)) { - return Short.valueOf((String)obj); - } - throw new Exception("Illegal conversion"); - } catch (Exception e) { - throw new SQLException("Cannot convert column " + columnIndex - + " to short: " + e.toString()); - } - } - - public short getShort(String columnName) throws SQLException { - return getShort(findColumn(columnName)); - } - - public Statement getStatement() throws SQLException { - throw new SQLException("Method not supported"); - } - - /** - * @param columnIndex - the first column is 1, the second is 2, ... - * @see java.sql.ResultSet#getString(int) - */ - - public String getString(int columnIndex) throws SQLException { - // Column index starts from 1, not 0. - Object obj = getObject(columnIndex); - if (obj == null) { - return null; - } - - return obj.toString(); - } - - public String getString(String columnName) throws SQLException { - return getString(findColumn(columnName)); - } - - public Time getTime(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Time getTime(String columnName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Time getTime(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Time getTime(String columnName, Calendar cal) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Timestamp getTimestamp(int columnIndex) throws SQLException { - Object obj = getObject(columnIndex); - if (obj == null) { - return null; - } - if (obj instanceof Timestamp) { - return (Timestamp) obj; - } - if (obj instanceof String) { - return Timestamp.valueOf((String)obj); - } - throw new SQLException("Illegal conversion"); - } - - public Timestamp getTimestamp(String columnName) throws SQLException { - return getTimestamp(findColumn(columnName)); - } - - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - throw new SQLException("Method not supported"); - } - - public Timestamp getTimestamp(String columnName, Calendar cal) throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getType() throws SQLException { - return ResultSet.TYPE_FORWARD_ONLY; - } - - public URL getURL(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public URL getURL(String columnName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public InputStream getUnicodeStream(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public InputStream getUnicodeStream(String columnName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void insertRow() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isAfterLast() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isBeforeFirst() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isClosed() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isFirst() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isLast() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean last() throws SQLException { - throw new SQLException("Method not supported"); - } - - public void moveToCurrentRow() throws SQLException { - throw new SQLException("Method not supported"); - } - - public void moveToInsertRow() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean previous() throws SQLException { - throw new SQLException("Method not supported"); - } - - public void refreshRow() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean relative(int rows) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean rowDeleted() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean rowInserted() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean rowUpdated() throws SQLException { - throw new SQLException("Method not supported"); - } - - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void setFetchSize(int rows) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateArray(int columnIndex, Array x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateArray(String columnName, Array x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateAsciiStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateAsciiStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateAsciiStream(int columnIndex, InputStream x, int length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateAsciiStream(String columnName, InputStream x, int length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateAsciiStream(int columnIndex, InputStream x, long length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateAsciiStream(String columnLabel, InputStream x, long length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBigDecimal(int columnIndex, BigDecimal x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBigDecimal(String columnName, BigDecimal x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBinaryStream(int columnIndex, InputStream x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBinaryStream(String columnLabel, InputStream x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBinaryStream(int columnIndex, InputStream x, int length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBinaryStream(String columnName, InputStream x, int length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBinaryStream(int columnIndex, InputStream x, long length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBinaryStream(String columnLabel, InputStream x, long length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBlob(int columnIndex, Blob x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBlob(String columnName, Blob x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBlob(int columnIndex, InputStream inputStream) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBlob(String columnLabel, InputStream inputStream) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBlob(int columnIndex, InputStream inputStream, long length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBlob(String columnLabel, InputStream inputStream, - long length) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBoolean(int columnIndex, boolean x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBoolean(String columnName, boolean x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateByte(int columnIndex, byte x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateByte(String columnName, byte x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBytes(int columnIndex, byte[] x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateBytes(String columnName, byte[] x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateCharacterStream(int columnIndex, Reader x, int length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateCharacterStream(String columnName, Reader reader, int length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateCharacterStream(int columnIndex, Reader x, long length) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateCharacterStream(String columnLabel, Reader reader, - long length) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateClob(int columnIndex, Clob x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateClob(String columnName, Clob x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateDate(int columnIndex, Date x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateDate(String columnName, Date x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateDouble(int columnIndex, double x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateDouble(String columnName, double x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateFloat(int columnIndex, float x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateFloat(String columnName, float x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateInt(int columnIndex, int x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateInt(String columnName, int x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateLong(int columnIndex, long x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateLong(String columnName, long x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNCharacterStream(int columnIndex, Reader x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNCharacterStream(String columnLabel, Reader reader) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNCharacterStream(int columnIndex, Reader x, long length) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNCharacterStream(String columnLabel, Reader reader, - long length) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNClob(int columnIndex, NClob clob) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNClob(String columnLabel, NClob clob) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNClob(int columnIndex, Reader reader) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNClob(String columnLabel, Reader reader) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNClob(int columnIndex, Reader reader, long length) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNClob(String columnLabel, Reader reader, long length) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNString(int columnIndex, String string) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNString(String columnLabel, String string) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNull(int columnIndex) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateNull(String columnName) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateObject(int columnIndex, Object x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateObject(String columnName, Object x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateObject(int columnIndex, Object x, int scale) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateObject(String columnName, Object x, int scale) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateRef(int columnIndex, Ref x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateRef(String columnName, Ref x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateRow() throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateRowId(int columnIndex, RowId x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateRowId(String columnLabel, RowId x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateSQLXML(int columnIndex, SQLXML xmlObject) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateSQLXML(String columnLabel, SQLXML xmlObject) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateShort(int columnIndex, short x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateShort(String columnName, short x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateString(int columnIndex, String x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateString(String columnName, String x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateTime(int columnIndex, Time x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateTime(String columnName, Time x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateTimestamp(int columnIndex, Timestamp x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public void updateTimestamp(String columnName, Timestamp x) throws SQLException { - throw new SQLException("Method not supported"); - } - - public SQLWarning getWarnings() throws SQLException { - return warningChain; - } - - public void clearWarnings() throws SQLException { - warningChain = null; - } - - public void close() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean wasNull() throws SQLException { - return wasNull; - } - - public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException("Method not supported"); - } - - public T unwrap(Class iface) throws SQLException { - throw new SQLException("Method not supported"); - } -} diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveCallableStatement.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveCallableStatement.java index ff961f3..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveCallableStatement.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveCallableStatement.java @@ -1,2464 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.Date; -import java.sql.NClob; -import java.sql.ParameterMetaData; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Time; -import java.sql.Timestamp; -import java.util.Calendar; -import java.util.Map; - -/** - * HiveCallableStatement. - * - */ -public class HiveCallableStatement implements java.sql.CallableStatement { - - /** - * - */ - public HiveCallableStatement() { - // TODO Auto-generated constructor stub - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getArray(int) - */ - - public Array getArray(int i) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getArray(java.lang.String) - */ - - public Array getArray(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBigDecimal(int) - */ - - public BigDecimal getBigDecimal(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBigDecimal(java.lang.String) - */ - - public BigDecimal getBigDecimal(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBigDecimal(int, int) - */ - - public BigDecimal getBigDecimal(int parameterIndex, int scale) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBlob(int) - */ - - public Blob getBlob(int i) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBlob(java.lang.String) - */ - - public Blob getBlob(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBoolean(int) - */ - - public boolean getBoolean(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBoolean(java.lang.String) - */ - - public boolean getBoolean(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getByte(int) - */ - - public byte getByte(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getByte(java.lang.String) - */ - - public byte getByte(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBytes(int) - */ - - public byte[] getBytes(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getBytes(java.lang.String) - */ - - public byte[] getBytes(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getCharacterStream(int) - */ - - public Reader getCharacterStream(int arg0) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getCharacterStream(java.lang.String) - */ - - public Reader getCharacterStream(String arg0) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getClob(int) - */ - - public Clob getClob(int i) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getClob(java.lang.String) - */ - - public Clob getClob(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getDate(int) - */ - - public Date getDate(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getDate(java.lang.String) - */ - - public Date getDate(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getDate(int, java.util.Calendar) - */ - - public Date getDate(int parameterIndex, Calendar cal) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getDate(java.lang.String, - * java.util.Calendar) - */ - - public Date getDate(String parameterName, Calendar cal) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getDouble(int) - */ - - public double getDouble(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getDouble(java.lang.String) - */ - - public double getDouble(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getFloat(int) - */ - - public float getFloat(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getFloat(java.lang.String) - */ - - public float getFloat(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getInt(int) - */ - - public int getInt(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getInt(java.lang.String) - */ - - public int getInt(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getLong(int) - */ - - public long getLong(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getLong(java.lang.String) - */ - - public long getLong(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getNCharacterStream(int) - */ - - public Reader getNCharacterStream(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getNCharacterStream(java.lang.String) - */ - - public Reader getNCharacterStream(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getNClob(int) - */ - - public NClob getNClob(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getNClob(java.lang.String) - */ - - public NClob getNClob(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getNString(int) - */ - - public String getNString(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getNString(java.lang.String) - */ - - public String getNString(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getObject(int) - */ - - public Object getObject(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getObject(java.lang.String) - */ - - public Object getObject(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - public T getObject(int parameterIndex, Class type) throws SQLException { - // TODO JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(String parameterName, Class type) throws SQLException { - // TODO JDK 1.7 - throw new SQLException("Method not supported"); - } - - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getObject(int, java.util.Map) - */ - - public Object getObject(int i, Map> map) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getObject(java.lang.String, java.util.Map) - */ - - public Object getObject(String parameterName, Map> map) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getRef(int) - */ - - public Ref getRef(int i) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getRef(java.lang.String) - */ - - public Ref getRef(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getRowId(int) - */ - - public RowId getRowId(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getRowId(java.lang.String) - */ - - public RowId getRowId(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getSQLXML(int) - */ - - public SQLXML getSQLXML(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getSQLXML(java.lang.String) - */ - - public SQLXML getSQLXML(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getShort(int) - */ - - public short getShort(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getShort(java.lang.String) - */ - - public short getShort(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getString(int) - */ - - public String getString(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getString(java.lang.String) - */ - - public String getString(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getTime(int) - */ - - public Time getTime(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getTime(java.lang.String) - */ - - public Time getTime(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getTime(int, java.util.Calendar) - */ - - public Time getTime(int parameterIndex, Calendar cal) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getTime(java.lang.String, - * java.util.Calendar) - */ - - public Time getTime(String parameterName, Calendar cal) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getTimestamp(int) - */ - - public Timestamp getTimestamp(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getTimestamp(java.lang.String) - */ - - public Timestamp getTimestamp(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getTimestamp(int, java.util.Calendar) - */ - - public Timestamp getTimestamp(int parameterIndex, Calendar cal) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getTimestamp(java.lang.String, - * java.util.Calendar) - */ - - public Timestamp getTimestamp(String parameterName, Calendar cal) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getURL(int) - */ - - public URL getURL(int parameterIndex) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#getURL(java.lang.String) - */ - - public URL getURL(String parameterName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#registerOutParameter(int, int) - */ - - public void registerOutParameter(int parameterIndex, int sqlType) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#registerOutParameter(java.lang.String, int) - */ - - public void registerOutParameter(String parameterName, int sqlType) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#registerOutParameter(int, int, int) - */ - - public void registerOutParameter(int parameterIndex, int sqlType, int scale) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#registerOutParameter(int, int, - * java.lang.String) - */ - - public void registerOutParameter(int paramIndex, int sqlType, String typeName) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#registerOutParameter(java.lang.String, int, - * int) - */ - - public void registerOutParameter(String parameterName, int sqlType, int scale) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#registerOutParameter(java.lang.String, int, - * java.lang.String) - */ - - public void registerOutParameter(String parameterName, int sqlType, - String typeName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setAsciiStream(java.lang.String, - * java.io.InputStream) - */ - - public void setAsciiStream(String parameterName, InputStream x) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setAsciiStream(java.lang.String, - * java.io.InputStream, int) - */ - - public void setAsciiStream(String parameterName, InputStream x, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setAsciiStream(java.lang.String, - * java.io.InputStream, long) - */ - - public void setAsciiStream(String parameterName, InputStream x, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBigDecimal(java.lang.String, - * java.math.BigDecimal) - */ - - public void setBigDecimal(String parameterName, BigDecimal x) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBinaryStream(java.lang.String, - * java.io.InputStream) - */ - - public void setBinaryStream(String parameterName, InputStream x) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBinaryStream(java.lang.String, - * java.io.InputStream, int) - */ - - public void setBinaryStream(String parameterName, InputStream x, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBinaryStream(java.lang.String, - * java.io.InputStream, long) - */ - - public void setBinaryStream(String parameterName, InputStream x, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBlob(java.lang.String, java.sql.Blob) - */ - - public void setBlob(String parameterName, Blob x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBlob(java.lang.String, - * java.io.InputStream) - */ - - public void setBlob(String parameterName, InputStream inputStream) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBlob(java.lang.String, - * java.io.InputStream, long) - */ - - public void setBlob(String parameterName, InputStream inputStream, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBoolean(java.lang.String, boolean) - */ - - public void setBoolean(String parameterName, boolean x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setByte(java.lang.String, byte) - */ - - public void setByte(String parameterName, byte x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setBytes(java.lang.String, byte[]) - */ - - public void setBytes(String parameterName, byte[] x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setCharacterStream(java.lang.String, - * java.io.Reader) - */ - - public void setCharacterStream(String parameterName, Reader reader) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setCharacterStream(java.lang.String, - * java.io.Reader, int) - */ - - public void setCharacterStream(String parameterName, Reader reader, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setCharacterStream(java.lang.String, - * java.io.Reader, long) - */ - - public void setCharacterStream(String parameterName, Reader reader, - long length) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setClob(java.lang.String, java.sql.Clob) - */ - - public void setClob(String parameterName, Clob x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setClob(java.lang.String, java.io.Reader) - */ - - public void setClob(String parameterName, Reader reader) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setClob(java.lang.String, java.io.Reader, - * long) - */ - - public void setClob(String parameterName, Reader reader, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setDate(java.lang.String, java.sql.Date) - */ - - public void setDate(String parameterName, Date x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setDate(java.lang.String, java.sql.Date, - * java.util.Calendar) - */ - - public void setDate(String parameterName, Date x, Calendar cal) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setDouble(java.lang.String, double) - */ - - public void setDouble(String parameterName, double x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setFloat(java.lang.String, float) - */ - - public void setFloat(String parameterName, float x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setInt(java.lang.String, int) - */ - - public void setInt(String parameterName, int x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setLong(java.lang.String, long) - */ - - public void setLong(String parameterName, long x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setNCharacterStream(java.lang.String, - * java.io.Reader) - */ - - public void setNCharacterStream(String parameterName, Reader value) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setNCharacterStream(java.lang.String, - * java.io.Reader, long) - */ - - public void setNCharacterStream(String parameterName, Reader value, - long length) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setNClob(java.lang.String, java.sql.NClob) - */ - - public void setNClob(String parameterName, NClob value) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setNClob(java.lang.String, java.io.Reader) - */ - - public void setNClob(String parameterName, Reader reader) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setNClob(java.lang.String, java.io.Reader, - * long) - */ - - public void setNClob(String parameterName, Reader reader, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setNString(java.lang.String, - * java.lang.String) - */ - - public void setNString(String parameterName, String value) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setNull(java.lang.String, int) - */ - - public void setNull(String parameterName, int sqlType) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setNull(java.lang.String, int, - * java.lang.String) - */ - - public void setNull(String parameterName, int sqlType, String typeName) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setObject(java.lang.String, - * java.lang.Object) - */ - - public void setObject(String parameterName, Object x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setObject(java.lang.String, - * java.lang.Object, int) - */ - - public void setObject(String parameterName, Object x, int targetSqlType) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setObject(java.lang.String, - * java.lang.Object, int, int) - */ - - public void setObject(String parameterName, Object x, int targetSqlType, - int scale) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setRowId(java.lang.String, java.sql.RowId) - */ - - public void setRowId(String parameterName, RowId x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setSQLXML(java.lang.String, - * java.sql.SQLXML) - */ - - public void setSQLXML(String parameterName, SQLXML xmlObject) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setShort(java.lang.String, short) - */ - - public void setShort(String parameterName, short x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setString(java.lang.String, - * java.lang.String) - */ - - public void setString(String parameterName, String x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setTime(java.lang.String, java.sql.Time) - */ - - public void setTime(String parameterName, Time x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setTime(java.lang.String, java.sql.Time, - * java.util.Calendar) - */ - - public void setTime(String parameterName, Time x, Calendar cal) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setTimestamp(java.lang.String, - * java.sql.Timestamp) - */ - - public void setTimestamp(String parameterName, Timestamp x) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setTimestamp(java.lang.String, - * java.sql.Timestamp, java.util.Calendar) - */ - - public void setTimestamp(String parameterName, Timestamp x, Calendar cal) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#setURL(java.lang.String, java.net.URL) - */ - - public void setURL(String parameterName, URL val) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.CallableStatement#wasNull() - */ - - public boolean wasNull() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#addBatch() - */ - - public void addBatch() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#clearParameters() - */ - - public void clearParameters() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#execute() - */ - - public boolean execute() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#executeQuery() - */ - - public ResultSet executeQuery() throws SQLException { - // TODO Auto-generated method stub - return new HiveQueryResultSet(null); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#executeUpdate() - */ - - public int executeUpdate() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#getMetaData() - */ - - public ResultSetMetaData getMetaData() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#getParameterMetaData() - */ - - public ParameterMetaData getParameterMetaData() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setArray(int, java.sql.Array) - */ - - public void setArray(int i, Array x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream) - */ - - public void setAsciiStream(int arg0, InputStream arg1) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream, - * int) - */ - - public void setAsciiStream(int parameterIndex, InputStream x, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream, - * long) - */ - - public void setAsciiStream(int arg0, InputStream arg1, long arg2) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBigDecimal(int, java.math.BigDecimal) - */ - - public void setBigDecimal(int parameterIndex, BigDecimal x) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream) - */ - - public void setBinaryStream(int parameterIndex, InputStream x) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream, - * int) - */ - - public void setBinaryStream(int parameterIndex, InputStream x, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream, - * long) - */ - - public void setBinaryStream(int parameterIndex, InputStream x, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBlob(int, java.sql.Blob) - */ - - public void setBlob(int i, Blob x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBlob(int, java.io.InputStream) - */ - - public void setBlob(int parameterIndex, InputStream inputStream) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBlob(int, java.io.InputStream, long) - */ - - public void setBlob(int parameterIndex, InputStream inputStream, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBoolean(int, boolean) - */ - - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setByte(int, byte) - */ - - public void setByte(int parameterIndex, byte x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBytes(int, byte[]) - */ - - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader) - */ - - public void setCharacterStream(int parameterIndex, Reader reader) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader, - * int) - */ - - public void setCharacterStream(int parameterIndex, Reader reader, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader, - * long) - */ - - public void setCharacterStream(int parameterIndex, Reader reader, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setClob(int, java.sql.Clob) - */ - - public void setClob(int i, Clob x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setClob(int, java.io.Reader) - */ - - public void setClob(int parameterIndex, Reader reader) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setClob(int, java.io.Reader, long) - */ - - public void setClob(int parameterIndex, Reader reader, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setDate(int, java.sql.Date) - */ - - public void setDate(int parameterIndex, Date x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setDate(int, java.sql.Date, - * java.util.Calendar) - */ - - public void setDate(int parameterIndex, Date x, Calendar cal) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setDouble(int, double) - */ - - public void setDouble(int parameterIndex, double x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setFloat(int, float) - */ - - public void setFloat(int parameterIndex, float x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setInt(int, int) - */ - - public void setInt(int parameterIndex, int x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setLong(int, long) - */ - - public void setLong(int parameterIndex, long x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNCharacterStream(int, java.io.Reader) - */ - - public void setNCharacterStream(int parameterIndex, Reader value) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNCharacterStream(int, java.io.Reader, - * long) - */ - - public void setNCharacterStream(int parameterIndex, Reader value, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNClob(int, java.sql.NClob) - */ - - public void setNClob(int parameterIndex, NClob value) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNClob(int, java.io.Reader) - */ - - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNClob(int, java.io.Reader, long) - */ - - public void setNClob(int parameterIndex, Reader reader, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNString(int, java.lang.String) - */ - - public void setNString(int parameterIndex, String value) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNull(int, int) - */ - - public void setNull(int parameterIndex, int sqlType) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNull(int, int, java.lang.String) - */ - - public void setNull(int paramIndex, int sqlType, String typeName) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setObject(int, java.lang.Object) - */ - - public void setObject(int parameterIndex, Object x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setObject(int, java.lang.Object, int) - */ - - public void setObject(int parameterIndex, Object x, int targetSqlType) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setObject(int, java.lang.Object, int, int) - */ - - public void setObject(int parameterIndex, Object x, int targetSqlType, - int scale) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setRef(int, java.sql.Ref) - */ - - public void setRef(int i, Ref x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setRowId(int, java.sql.RowId) - */ - - public void setRowId(int parameterIndex, RowId x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setSQLXML(int, java.sql.SQLXML) - */ - - public void setSQLXML(int parameterIndex, SQLXML xmlObject) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setShort(int, short) - */ - - public void setShort(int parameterIndex, short x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setString(int, java.lang.String) - */ - - public void setString(int parameterIndex, String x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setTime(int, java.sql.Time) - */ - - public void setTime(int parameterIndex, Time x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setTime(int, java.sql.Time, - * java.util.Calendar) - */ - - public void setTime(int parameterIndex, Time x, Calendar cal) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setTimestamp(int, java.sql.Timestamp) - */ - - public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setTimestamp(int, java.sql.Timestamp, - * java.util.Calendar) - */ - - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setURL(int, java.net.URL) - */ - - public void setURL(int parameterIndex, URL x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setUnicodeStream(int, java.io.InputStream, - * int) - */ - - public void setUnicodeStream(int parameterIndex, InputStream x, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#addBatch(java.lang.String) - */ - - public void addBatch(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#cancel() - */ - - public void cancel() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#clearBatch() - */ - - public void clearBatch() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#clearWarnings() - */ - - public void clearWarnings() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#close() - */ - - public void close() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - public void closeOnCompletion() throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - public boolean isCloseOnCompletion() throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String) - */ - - public boolean execute(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, int) - */ - - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, int[]) - */ - - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, java.lang.String[]) - */ - - public boolean execute(String sql, String[] columnNames) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeBatch() - */ - - public int[] executeBatch() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeQuery(java.lang.String) - */ - - public ResultSet executeQuery(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String) - */ - - public int executeUpdate(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, int) - */ - - public int executeUpdate(String sql, int autoGeneratedKeys) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, int[]) - */ - - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, java.lang.String[]) - */ - - public int executeUpdate(String sql, String[] columnNames) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getConnection() - */ - - public Connection getConnection() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getFetchDirection() - */ - - public int getFetchDirection() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getFetchSize() - */ - - public int getFetchSize() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getGeneratedKeys() - */ - - public ResultSet getGeneratedKeys() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMaxFieldSize() - */ - - public int getMaxFieldSize() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMaxRows() - */ - - public int getMaxRows() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMoreResults() - */ - - public boolean getMoreResults() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMoreResults(int) - */ - - public boolean getMoreResults(int current) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getQueryTimeout() - */ - - public int getQueryTimeout() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSet() - */ - - public ResultSet getResultSet() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetConcurrency() - */ - - public int getResultSetConcurrency() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetHoldability() - */ - - public int getResultSetHoldability() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetType() - */ - - public int getResultSetType() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getUpdateCount() - */ - - public int getUpdateCount() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getWarnings() - */ - - public SQLWarning getWarnings() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#isClosed() - */ - - public boolean isClosed() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#isPoolable() - */ - - public boolean isPoolable() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setCursorName(java.lang.String) - */ - - public void setCursorName(String name) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setEscapeProcessing(boolean) - */ - - public void setEscapeProcessing(boolean enable) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setFetchDirection(int) - */ - - public void setFetchDirection(int direction) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setFetchSize(int) - */ - - public void setFetchSize(int rows) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setMaxFieldSize(int) - */ - - public void setMaxFieldSize(int max) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setMaxRows(int) - */ - - public void setMaxRows(int max) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setPoolable(boolean) - */ - - public void setPoolable(boolean arg0) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setQueryTimeout(int) - */ - - public void setQueryTimeout(int seconds) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#isWrapperFor(java.lang.Class) - */ - - public boolean isWrapperFor(Class iface) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#unwrap(java.lang.Class) - */ - - public T unwrap(Class iface) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - -} diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java index 59ce692..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveConnection.java @@ -1,708 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.service.HiveClient; -import org.apache.hadoop.hive.service.HiveInterface; -import org.apache.hadoop.hive.service.HiveServer; -import org.apache.thrift.TException; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.protocol.TProtocol; -import org.apache.thrift.transport.TSocket; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportException; -import java.util.concurrent.Executor; - -import java.sql.Array; -import java.sql.Blob; -import java.sql.CallableStatement; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.NClob; -import java.sql.PreparedStatement; -import java.sql.SQLClientInfoException; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Savepoint; -import java.sql.Statement; -import java.sql.Struct; -import java.util.Map; -import java.util.Properties; - -/** - * HiveConnection. - * - */ -public class HiveConnection implements java.sql.Connection { - private TTransport transport; - private HiveInterface client; - private boolean isClosed = true; - private SQLWarning warningChain = null; - - private static final String URI_PREFIX = "jdbc:hive://"; - - /** - * Create a connection to a local Hive - * - * @param hiveConf - * @throws SQLException - */ - public HiveConnection(HiveConf hiveConf) throws SQLException { - try { - client = new HiveServer.HiveServerHandler(hiveConf); - } catch (MetaException e) { - throw new SQLException("Error accessing Hive metastore: " - + e.getMessage(), "08S01",e); - } - isClosed = false; - configureConnection(); - } - - /** - * TODO: - parse uri (use java.net.URI?). - */ - public HiveConnection(String uri, Properties info) throws SQLException { - if (!uri.startsWith(URI_PREFIX)) { - throw new SQLException("Invalid URL: " + uri, "08S01"); - } - - // remove prefix - uri = uri.substring(URI_PREFIX.length()); - - // If uri is not specified, use local mode. - if (uri.isEmpty()) { - try { - client = new HiveServer.HiveServerHandler(); - } catch (MetaException e) { - throw new SQLException("Error accessing Hive metastore: " - + e.getMessage(), "08S01",e); - } - } else { - // parse uri - // form: hostname:port/databasename - String[] parts = uri.split("/"); - String[] hostport = parts[0].split(":"); - int port = 10000; - String host = hostport[0]; - try { - port = Integer.parseInt(hostport[1]); - } catch (Exception e) { - } - transport = new TSocket(host, port); - TProtocol protocol = new TBinaryProtocol(transport); - client = new HiveClient(protocol); - try { - transport.open(); - } catch (TTransportException e) { - throw new SQLException("Could not establish connection to " - + uri + ": " + e.getMessage(), "08S01"); - } - } - isClosed = false; - configureConnection(); - } - - public void abort(Executor executor) throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - private void configureConnection() throws SQLException { - Statement stmt = createStatement(); - stmt.execute( - "set hive.fetch.output.serde = org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); - stmt.close(); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#clearWarnings() - */ - - public void clearWarnings() throws SQLException { - warningChain = null; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#close() - */ - - public void close() throws SQLException { - if (!isClosed) { - try { - client.clean(); - } catch (TException e) { - throw new SQLException("Error while cleaning up the server resources", e); - } finally { - isClosed = true; - if (transport != null) { - transport.close(); - } - } - } - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#commit() - */ - - public void commit() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#createArrayOf(java.lang.String, - * java.lang.Object[]) - */ - - public Array createArrayOf(String arg0, Object[] arg1) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#createBlob() - */ - - public Blob createBlob() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#createClob() - */ - - public Clob createClob() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#createNClob() - */ - - public NClob createNClob() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#createSQLXML() - */ - - public SQLXML createSQLXML() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /** - * Creates a Statement object for sending SQL statements to the database. - * - * @throws SQLException - * if a database access error occurs. - * @see java.sql.Connection#createStatement() - */ - - public Statement createStatement() throws SQLException { - if (isClosed) { - throw new SQLException("Can't create Statement, connection is closed"); - } - return new HiveStatement(client); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#createStatement(int, int) - */ - - public Statement createStatement(int resultSetType, int resultSetConcurrency) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#createStatement(int, int, int) - */ - - public Statement createStatement(int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#createStruct(java.lang.String, java.lang.Object[]) - */ - - public Struct createStruct(String typeName, Object[] attributes) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getAutoCommit() - */ - - public boolean getAutoCommit() throws SQLException { - return true; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getCatalog() - */ - - public String getCatalog() throws SQLException { - return ""; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getClientInfo() - */ - - public Properties getClientInfo() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getClientInfo(java.lang.String) - */ - - public String getClientInfo(String name) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getHoldability() - */ - - public int getHoldability() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getMetaData() - */ - - public DatabaseMetaData getMetaData() throws SQLException { - return new HiveDatabaseMetaData(client); - } - - - public int getNetworkTimeout() throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - - public String getSchema() throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getTransactionIsolation() - */ - - public int getTransactionIsolation() throws SQLException { - return Connection.TRANSACTION_NONE; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getTypeMap() - */ - - public Map> getTypeMap() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#getWarnings() - */ - - public SQLWarning getWarnings() throws SQLException { - return warningChain; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#isClosed() - */ - - public boolean isClosed() throws SQLException { - return isClosed; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#isReadOnly() - */ - - public boolean isReadOnly() throws SQLException { - return false; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#isValid(int) - */ - - public boolean isValid(int timeout) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#nativeSQL(java.lang.String) - */ - - public String nativeSQL(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareCall(java.lang.String) - */ - - public CallableStatement prepareCall(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareCall(java.lang.String, int, int) - */ - - public CallableStatement prepareCall(String sql, int resultSetType, - int resultSetConcurrency) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareCall(java.lang.String, int, int, int) - */ - - public CallableStatement prepareCall(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareStatement(java.lang.String) - */ - - public PreparedStatement prepareStatement(String sql) throws SQLException { - return new HivePreparedStatement(client, sql); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareStatement(java.lang.String, int) - */ - - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) - throws SQLException { - return new HivePreparedStatement(client, sql); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareStatement(java.lang.String, int[]) - */ - - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareStatement(java.lang.String, - * java.lang.String[]) - */ - - public PreparedStatement prepareStatement(String sql, String[] columnNames) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareStatement(java.lang.String, int, int) - */ - - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency) throws SQLException { - return new HivePreparedStatement(client, sql); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#prepareStatement(java.lang.String, int, int, int) - */ - - public PreparedStatement prepareStatement(String sql, int resultSetType, - int resultSetConcurrency, int resultSetHoldability) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#releaseSavepoint(java.sql.Savepoint) - */ - - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#rollback() - */ - - public void rollback() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#rollback(java.sql.Savepoint) - */ - - public void rollback(Savepoint savepoint) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setAutoCommit(boolean) - */ - - public void setAutoCommit(boolean autoCommit) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setCatalog(java.lang.String) - */ - - public void setCatalog(String catalog) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setClientInfo(java.util.Properties) - */ - - public void setClientInfo(Properties properties) - throws SQLClientInfoException { - // TODO Auto-generated method stub - throw new SQLClientInfoException("Method not supported", null); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setClientInfo(java.lang.String, java.lang.String) - */ - - public void setClientInfo(String name, String value) - throws SQLClientInfoException { - // TODO Auto-generated method stub - throw new SQLClientInfoException("Method not supported", null); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setHoldability(int) - */ - - public void setHoldability(int holdability) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setReadOnly(boolean) - */ - - public void setReadOnly(boolean readOnly) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setSavepoint() - */ - - public Savepoint setSavepoint() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setSavepoint(java.lang.String) - */ - - public Savepoint setSavepoint(String name) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - public void setSchema(String schema) throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setTransactionIsolation(int) - */ - - public void setTransactionIsolation(int level) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Connection#setTypeMap(java.util.Map) - */ - - public void setTypeMap(Map> map) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#isWrapperFor(java.lang.Class) - */ - - public boolean isWrapperFor(Class iface) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - public T unwrap(Class iface) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } -} - diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java index f016d74..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDataSource.java @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.io.PrintWriter; -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; -import java.util.logging.Logger; - -import javax.sql.DataSource; - -/** - * HiveDataSource. - * - */ -public class HiveDataSource implements DataSource { - - /** - * - */ - public HiveDataSource() { - // TODO Auto-generated constructor stub - } - - /* - * (non-Javadoc) - * - * @see javax.sql.DataSource#getConnection() - */ - - public Connection getConnection() throws SQLException { - return getConnection("", ""); - } - - /* - * (non-Javadoc) - * - * @see javax.sql.DataSource#getConnection(java.lang.String, java.lang.String) - */ - - public Connection getConnection(String username, String password) - throws SQLException { - try { - return new HiveConnection("", null); - } catch (Exception ex) { - throw new SQLException("Error in getting HiveConnection",ex); - } - } - - /* - * (non-Javadoc) - * - * @see javax.sql.CommonDataSource#getLogWriter() - */ - - public PrintWriter getLogWriter() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see javax.sql.CommonDataSource#getLoginTimeout() - */ - - public int getLoginTimeout() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see javax.sql.CommonDataSource#setLogWriter(java.io.PrintWriter) - */ - - public Logger getParentLogger() throws SQLFeatureNotSupportedException { - // JDK 1.7 - throw new SQLFeatureNotSupportedException("Method not supported"); - } - - - public void setLogWriter(PrintWriter arg0) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see javax.sql.CommonDataSource#setLoginTimeout(int) - */ - - public void setLoginTimeout(int arg0) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#isWrapperFor(java.lang.Class) - */ - - public boolean isWrapperFor(Class arg0) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#unwrap(java.lang.Class) - */ - - public T unwrap(Class arg0) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - -} - diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDatabaseMetaData.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDatabaseMetaData.java index bda3e0d..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDatabaseMetaData.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDatabaseMetaData.java @@ -1,1174 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.RowIdLifetime; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.jar.Attributes; - -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.service.HiveInterface; -import org.apache.thrift.TException; - -/** - * HiveDatabaseMetaData. - * - */ -public class HiveDatabaseMetaData implements java.sql.DatabaseMetaData { - - private final HiveInterface client; - private static final String CATALOG_SEPARATOR = "."; - - private static final char SEARCH_STRING_ESCAPE = '\\'; - - // The maximum column length = MFieldSchema.FNAME in metastore/src/model/package.jdo - private static final int maxColumnNameLength = 128; - - /** - * - */ - public HiveDatabaseMetaData(HiveInterface client) { - this.client = client; - } - - public boolean allProceduresAreCallable() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean allTablesAreSelectable() throws SQLException { - return true; - } - - public boolean autoCommitFailureClosesAllResultSets() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean dataDefinitionCausesTransactionCommit() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean dataDefinitionIgnoredInTransactions() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean deletesAreDetected(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean doesMaxRowSizeIncludeBlobs() throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getAttributes(String catalog, String schemaPattern, - String typeNamePattern, String attributeNamePattern) throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getBestRowIdentifier(String catalog, String schema, - String table, int scope, boolean nullable) throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getCatalogSeparator() throws SQLException { - return CATALOG_SEPARATOR; - } - - public String getCatalogTerm() throws SQLException { - return "database"; - } - - public ResultSet getCatalogs() throws SQLException { - try { - // TODO a client call to get the schema's after HIVE-675 is implemented - final List catalogs = new ArrayList(); - catalogs.add("default"); - return new HiveMetaDataResultSet(Arrays.asList("TABLE_CAT") - , Arrays.asList("STRING") - , catalogs) { - private int cnt = 0; - - public boolean next() throws SQLException { - if (cnt a = new ArrayList(1); - a.add(data.get(cnt)); // TABLE_CAT String => table catalog (may be null) - row = a; - cnt++; - return true; - } else { - return false; - } - } - - public T getObject(String columnLabel, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(int columnIndex, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - }; - } catch (Exception e) { - throw new SQLException(e); - } - } - - public ResultSet getClientInfoProperties() throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getColumnPrivileges(String catalog, String schema, - String table, String columnNamePattern) throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getPseudoColumns(String catalog, String schemaPattern, - String tableNamePattern, String columnNamePattern) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean generatedKeyAlwaysReturned() throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - - /** - * Convert a pattern containing JDBC catalog search wildcards into - * Java regex patterns. - * - * @param pattern input which may contain '%' or '_' wildcard characters, or - * these characters escaped using {@link #getSearchStringEscape()}. - * @return replace %/_ with regex search characters, also handle escaped - * characters. - */ - private String convertPattern(final String pattern) { - if (pattern==null) { - return ".*"; - } else { - StringBuilder result = new StringBuilder(pattern.length()); - - boolean escaped = false; - for (int i = 0, len = pattern.length(); i < len; i++) { - char c = pattern.charAt(i); - if (escaped) { - if (c != SEARCH_STRING_ESCAPE) { - escaped = false; - } - result.append(c); - } else { - if (c == SEARCH_STRING_ESCAPE) { - escaped = true; - continue; - } else if (c == '%') { - result.append(".*"); - } else if (c == '_') { - result.append('.'); - } else { - result.append(Character.toLowerCase(c)); - } - } - } - - return result.toString(); - } - } - - public ResultSet getColumns(String catalog, final String schemaPattern - , final String tableNamePattern - , final String columnNamePattern) throws SQLException { - List columns = new ArrayList(); - try { - if (catalog==null) { - catalog = "default"; - } - - String regtableNamePattern = convertPattern(tableNamePattern); - String regcolumnNamePattern = convertPattern(columnNamePattern); - - List tables = client.get_tables(catalog, "*"); - for (String table: tables) { - if (table.matches(regtableNamePattern)) { - List fields = client.get_schema(catalog, table); - int ordinalPos = 1; - for (FieldSchema field: fields) { - if (field.getName().matches(regcolumnNamePattern)) { - columns.add(new JdbcColumn(field.getName(), table, catalog - , field.getType(), field.getComment(), ordinalPos)); - ordinalPos++; - } - } - } - } - Collections.sort(columns, new GetColumnsComparator()); - - return new HiveMetaDataResultSet( - Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "DATA_TYPE" - , "TYPE_NAME", "COLUMN_SIZE", "BUFFER_LENGTH", "DECIMAL_DIGITS" - , "NUM_PREC_RADIX", "NULLABLE", "REMARKS", "COLUMN_DEF", "SQL_DATA_TYPE" - , "SQL_DATETIME_SUB", "CHAR_OCTET_LENGTH", "ORDINAL_POSITION" - , "IS_NULLABLE", "SCOPE_CATLOG", "SCOPE_SCHEMA", "SCOPE_TABLE" - , "SOURCE_DATA_TYPE") - , Arrays.asList("STRING", "STRING", "STRING", "STRING", "INT", "STRING" - , "INT", "INT", "INT", "INT", "INT", "STRING" - , "STRING", "INT", "INT", "INT", "INT" - , "STRING", "STRING", "STRING", "STRING", "INT") - , columns) { - - private int cnt = 0; - - public boolean next() throws SQLException { - if (cnt a = new ArrayList(20); - JdbcColumn column = data.get(cnt); - a.add(column.getTableCatalog()); // TABLE_CAT String => table catalog (may be null) - a.add(null); // TABLE_SCHEM String => table schema (may be null) - a.add(column.getTableName()); // TABLE_NAME String => table name - a.add(column.getColumnName()); // COLUMN_NAME String => column name - a.add(column.getSqlType()); // DATA_TYPE short => SQL type from java.sql.Types - a.add(column.getType()); // TYPE_NAME String => Data source dependent type name. - a.add(column.getColumnSize()); // COLUMN_SIZE int => column size. - a.add(null); // BUFFER_LENGTH is not used. - a.add(column.getDecimalDigits()); // DECIMAL_DIGITS int => number of fractional digits - a.add(column.getNumPrecRadix()); // NUM_PREC_RADIX int => typically either 10 or 2 - a.add(DatabaseMetaData.columnNullable); // NULLABLE int => is NULL allowed? - a.add(column.getComment()); // REMARKS String => comment describing column (may be null) - a.add(null); // COLUMN_DEF String => default value (may be null) - a.add(null); // SQL_DATA_TYPE int => unused - a.add(null); // SQL_DATETIME_SUB int => unused - a.add(null); // CHAR_OCTET_LENGTH int - a.add(column.getOrdinalPos()); // ORDINAL_POSITION int - a.add("YES"); // IS_NULLABLE String - a.add(null); // SCOPE_CATLOG String - a.add(null); // SCOPE_SCHEMA String - a.add(null); // SCOPE_TABLE String - a.add(null); // SOURCE_DATA_TYPE short - row = a; - cnt++; - return true; - } else { - return false; - } - } - - public T getObject(String columnLabel, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(int columnIndex, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - }; - } catch (Exception e) { - throw new SQLException(e); - } - } - - /** - * We sort the output of getColumns to guarantee jdbc compliance. - * First check by table name then by ordinal position - */ - private class GetColumnsComparator implements Comparator { - - public int compare(JdbcColumn o1, JdbcColumn o2) { - int compareName = o1.getTableName().compareTo(o2.getTableName()); - if (compareName==0) { - if (o1.getOrdinalPos() > o2.getOrdinalPos()) { - return 1; - } else if (o1.getOrdinalPos() < o2.getOrdinalPos()) { - return -1; - } - return 0; - } else { - return compareName; - } - } - } - - public Connection getConnection() throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getCrossReference(String primaryCatalog, - String primarySchema, String primaryTable, String foreignCatalog, - String foreignSchema, String foreignTable) throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getDatabaseMajorVersion() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getDatabaseMinorVersion() throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getDatabaseProductName() throws SQLException { - return "Hive"; - } - - public String getDatabaseProductVersion() throws SQLException { - try { - return client.getVersion(); - } catch (TException e) { - throw new SQLException(e); - } - } - - public int getDefaultTransactionIsolation() throws SQLException { - return Connection.TRANSACTION_NONE; - } - - public int getDriverMajorVersion() { - return HiveDriver.getMajorDriverVersion(); - } - - public int getDriverMinorVersion() { - return HiveDriver.getMinorDriverVersion(); - } - - public String getDriverName() throws SQLException { - return HiveDriver.fetchManifestAttribute(Attributes.Name.IMPLEMENTATION_TITLE); - } - - public String getDriverVersion() throws SQLException { - return HiveDriver.fetchManifestAttribute(Attributes.Name.IMPLEMENTATION_VERSION); - } - - public ResultSet getExportedKeys(String catalog, String schema, String table) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getExtraNameCharacters() throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getFunctionColumns(String arg0, String arg1, String arg2, - String arg3) throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getFunctions(String arg0, String arg1, String arg2) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getIdentifierQuoteString() throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getImportedKeys(String catalog, String schema, String table) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getIndexInfo(String catalog, String schema, String table, - boolean unique, boolean approximate) throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getJDBCMajorVersion() throws SQLException { - return 3; - } - - public int getJDBCMinorVersion() throws SQLException { - return 0; - } - - public int getMaxBinaryLiteralLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxCatalogNameLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxCharLiteralLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - /** - * Returns the value of maxColumnNameLength. - * - */ - public int getMaxColumnNameLength() throws SQLException { - return maxColumnNameLength; - } - - public int getMaxColumnsInGroupBy() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxColumnsInIndex() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxColumnsInOrderBy() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxColumnsInSelect() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxColumnsInTable() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxConnections() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxCursorNameLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxIndexLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxProcedureNameLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxRowSize() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxSchemaNameLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxStatementLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxStatements() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxTableNameLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxTablesInSelect() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getMaxUserNameLength() throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getNumericFunctions() throws SQLException { - return ""; - } - - public ResultSet getPrimaryKeys(String catalog, String schema, String table) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getProcedureColumns(String catalog, String schemaPattern, - String procedureNamePattern, String columnNamePattern) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getProcedureTerm() throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getProcedures(String catalog, String schemaPattern, - String procedureNamePattern) throws SQLException { - return null; - } - - public int getResultSetHoldability() throws SQLException { - throw new SQLException("Method not supported"); - } - - public RowIdLifetime getRowIdLifetime() throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getSQLKeywords() throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getSQLStateType() throws SQLException { - return DatabaseMetaData.sqlStateSQL99; - } - - public String getSchemaTerm() throws SQLException { - return ""; - } - - public ResultSet getSchemas() throws SQLException { - return getSchemas(null, null); - } - - public ResultSet getSchemas(String catalog, String schemaPattern) - throws SQLException { - return new HiveMetaDataResultSet(Arrays.asList("TABLE_SCHEM", "TABLE_CATALOG") - , Arrays.asList("STRING", "STRING"), null) { - - public boolean next() throws SQLException { - return false; - } - - public T getObject(String columnLabel, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(int columnIndex, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - }; - - } - - public String getSearchStringEscape() throws SQLException { - return String.valueOf(SEARCH_STRING_ESCAPE); - } - - public String getStringFunctions() throws SQLException { - return ""; - } - - public ResultSet getSuperTables(String catalog, String schemaPattern, - String tableNamePattern) throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getSuperTypes(String catalog, String schemaPattern, - String typeNamePattern) throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getSystemFunctions() throws SQLException { - return ""; - } - - public ResultSet getTablePrivileges(String catalog, String schemaPattern, - String tableNamePattern) throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getTableTypes() throws SQLException { - final TableType[] tt = TableType.values(); - ResultSet result = new HiveMetaDataResultSet( - Arrays.asList("TABLE_TYPE") - , Arrays.asList("STRING"), new ArrayList(Arrays.asList(tt))) { - private int cnt = 0; - - public boolean next() throws SQLException { - if (cnt a = new ArrayList(1); - a.add(toJdbcTableType(data.get(cnt).name())); - row = a; - cnt++; - return true; - } else { - return false; - } - } - - public T getObject(String columnLabel, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(int columnIndex, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - }; - return result; - } - - public ResultSet getTables(String catalog, String schemaPattern, - String tableNamePattern, String[] types) throws SQLException { - final List tablesstr; - final List resultTables = new ArrayList(); - final String resultCatalog; - if (catalog==null) { // On jdbc the default catalog is null but on hive it's "default" - resultCatalog = "default"; - } else { - resultCatalog = catalog; - } - - String regtableNamePattern = convertPattern(tableNamePattern); - try { - tablesstr = client.get_tables(resultCatalog, "*"); - for (String tablestr: tablesstr) { - if (tablestr.matches(regtableNamePattern)) { - Table tbl = client.get_table(resultCatalog, tablestr); - if (types == null) { - resultTables.add(new JdbcTable(resultCatalog, tbl.getTableName(), tbl.getTableType() - , tbl.getParameters().get("comment"))); - } else { - String tableType = toJdbcTableType(tbl.getTableType()); - for(String type : types) { - if (type.equalsIgnoreCase(tableType)) { - resultTables.add(new JdbcTable(resultCatalog, tbl.getTableName(), tbl.getTableType() - , tbl.getParameters().get("comment"))); - break; - } - } - } - } - } - Collections.sort(resultTables, new GetTablesComparator()); - } catch (Exception e) { - throw new SQLException(e); - } - ResultSet result = new HiveMetaDataResultSet( - Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "TABLE_TYPE", "REMARKS") - , Arrays.asList("STRING", "STRING", "STRING", "STRING", "STRING") - , resultTables) { - private int cnt = 0; - - public boolean next() throws SQLException { - if (cnt a = new ArrayList(5); - JdbcTable table = data.get(cnt); - a.add(table.getTableCatalog()); // TABLE_CAT String => table catalog (may be null) - a.add(null); // TABLE_SCHEM String => table schema (may be null) - a.add(table.getTableName()); // TABLE_NAME String => table name - try { - a.add(table.getSqlTableType()); // TABLE_TYPE String => "TABLE","VIEW" - } catch (Exception e) { - throw new SQLException(e); - } - a.add(table.getComment()); // REMARKS String => explanatory comment on the table - row = a; - cnt++; - return true; - } else { - return false; - } - } - - public T getObject(String columnLabel, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(int columnIndex, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - }; - return result; - } - - /** - * We sort the output of getTables to guarantee jdbc compliance. - * First check by table type then by table name - */ - private class GetTablesComparator implements Comparator { - - public int compare(JdbcTable o1, JdbcTable o2) { - int compareType = o1.getType().compareTo(o2.getType()); - if (compareType==0) { - return o1.getTableName().compareTo(o2.getTableName()); - } else { - return compareType; - } - } - } - - /** - * Translate hive table types into jdbc table types. - * @param hivetabletype - * @return the type of the table - */ - public static String toJdbcTableType(String hivetabletype) { - if (hivetabletype==null) { - return null; - } else if (hivetabletype.equals(TableType.MANAGED_TABLE.toString())) { - return "TABLE"; - } else if (hivetabletype.equals(TableType.VIRTUAL_VIEW.toString())) { - return "VIEW"; - } else if (hivetabletype.equals(TableType.EXTERNAL_TABLE.toString())) { - return "EXTERNAL TABLE"; - } else { - return hivetabletype; - } - } - - public String getTimeDateFunctions() throws SQLException { - return ""; - } - - public ResultSet getTypeInfo() throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getUDTs(String catalog, String schemaPattern, - String typeNamePattern, int[] types) throws SQLException { - - return new HiveMetaDataResultSet( - Arrays.asList("TYPE_CAT", "TYPE_SCHEM", "TYPE_NAME", "CLASS_NAME", "DATA_TYPE" - , "REMARKS", "BASE_TYPE") - , Arrays.asList("STRING", "STRING", "STRING", "STRING", "INT", "STRING", "INT") - , null) { - - public boolean next() throws SQLException { - return false; - } - - public T getObject(String columnLabel, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(int columnIndex, Class type) - throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - }; - } - - public String getURL() throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getUserName() throws SQLException { - throw new SQLException("Method not supported"); - } - - public ResultSet getVersionColumns(String catalog, String schema, String table) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean insertsAreDetected(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isCatalogAtStart() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isReadOnly() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean locatorsUpdateCopy() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean nullPlusNonNullIsNull() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean nullsAreSortedAtEnd() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean nullsAreSortedAtStart() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean nullsAreSortedHigh() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean nullsAreSortedLow() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean othersDeletesAreVisible(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean othersInsertsAreVisible(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean othersUpdatesAreVisible(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean ownDeletesAreVisible(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean ownInsertsAreVisible(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean ownUpdatesAreVisible(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean storesLowerCaseIdentifiers() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean storesLowerCaseQuotedIdentifiers() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean storesMixedCaseIdentifiers() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean storesMixedCaseQuotedIdentifiers() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean storesUpperCaseIdentifiers() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean storesUpperCaseQuotedIdentifiers() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsANSI92EntryLevelSQL() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsANSI92FullSQL() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsANSI92IntermediateSQL() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsAlterTableWithAddColumn() throws SQLException { - return true; - } - - public boolean supportsAlterTableWithDropColumn() throws SQLException { - return false; - } - - public boolean supportsBatchUpdates() throws SQLException { - return false; - } - - public boolean supportsCatalogsInDataManipulation() throws SQLException { - return false; - } - - public boolean supportsCatalogsInIndexDefinitions() throws SQLException { - return false; - } - - public boolean supportsCatalogsInPrivilegeDefinitions() throws SQLException { - return false; - } - - public boolean supportsCatalogsInProcedureCalls() throws SQLException { - return false; - } - - public boolean supportsCatalogsInTableDefinitions() throws SQLException { - return false; - } - - public boolean supportsColumnAliasing() throws SQLException { - return true; - } - - public boolean supportsConvert() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsConvert(int fromType, int toType) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsCoreSQLGrammar() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsCorrelatedSubqueries() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsDataDefinitionAndDataManipulationTransactions() - throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsDataManipulationTransactionsOnly() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsDifferentTableCorrelationNames() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsExpressionsInOrderBy() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsExtendedSQLGrammar() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsFullOuterJoins() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsGetGeneratedKeys() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsGroupBy() throws SQLException { - return true; - } - - public boolean supportsGroupByBeyondSelect() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsGroupByUnrelated() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsIntegrityEnhancementFacility() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsLikeEscapeClause() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsLimitedOuterJoins() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsMinimumSQLGrammar() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsMixedCaseIdentifiers() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsMixedCaseQuotedIdentifiers() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsMultipleOpenResults() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsMultipleResultSets() throws SQLException { - return false; - } - - public boolean supportsMultipleTransactions() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsNamedParameters() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsNonNullableColumns() throws SQLException { - return false; - } - - public boolean supportsOpenCursorsAcrossCommit() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsOpenCursorsAcrossRollback() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsOpenStatementsAcrossCommit() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsOpenStatementsAcrossRollback() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsOrderByUnrelated() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsOuterJoins() throws SQLException { - return true; - } - - public boolean supportsPositionedDelete() throws SQLException { - return false; - } - - public boolean supportsPositionedUpdate() throws SQLException { - return false; - } - - public boolean supportsResultSetConcurrency(int type, int concurrency) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsResultSetHoldability(int holdability) - throws SQLException { - return false; - } - - public boolean supportsResultSetType(int type) throws SQLException { - return true; - } - - public boolean supportsSavepoints() throws SQLException { - return false; - } - - public boolean supportsSchemasInDataManipulation() throws SQLException { - return false; - } - - public boolean supportsSchemasInIndexDefinitions() throws SQLException { - return false; - } - - public boolean supportsSchemasInPrivilegeDefinitions() throws SQLException { - return false; - } - - public boolean supportsSchemasInProcedureCalls() throws SQLException { - return false; - } - - public boolean supportsSchemasInTableDefinitions() throws SQLException { - return false; - } - - public boolean supportsSelectForUpdate() throws SQLException { - return false; - } - - public boolean supportsStatementPooling() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsStoredFunctionsUsingCallSyntax() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsStoredProcedures() throws SQLException { - return false; - } - - public boolean supportsSubqueriesInComparisons() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsSubqueriesInExists() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsSubqueriesInIns() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsSubqueriesInQuantifieds() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsTableCorrelationNames() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsTransactionIsolationLevel(int level) - throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsTransactions() throws SQLException { - return false; - } - - public boolean supportsUnion() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean supportsUnionAll() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean updatesAreDetected(int type) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean usesLocalFilePerTable() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean usesLocalFiles() throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException("Method not supported"); - } - - public T unwrap(Class iface) throws SQLException { - throw new SQLException("Method not supported"); - } - - public static void main(String[] args) throws SQLException { - HiveDatabaseMetaData meta = new HiveDatabaseMetaData(null); - System.out.println("DriverName: " + meta.getDriverName()); - System.out.println("DriverVersion: " + meta.getDriverVersion()); - } - -} - diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDriver.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDriver.java index 68f1d15..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDriver.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveDriver.java @@ -1,307 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.io.IOException; -import java.net.URL; -import java.sql.Connection; -import java.sql.Driver; -import java.sql.DriverPropertyInfo; -import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; -import java.util.Properties; -import java.util.jar.Attributes; -import java.util.jar.Manifest; -import java.util.logging.Logger; -import java.util.regex.Pattern; - -/** - * HiveDriver. - * - */ -public class HiveDriver implements Driver { - static { - try { - java.sql.DriverManager.registerDriver(new HiveDriver()); - } catch (SQLException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - - /** - * Is this driver JDBC compliant? - */ - private static final boolean JDBC_COMPLIANT = false; - - /** - * The required prefix for the connection URL. - */ - private static final String URL_PREFIX = "jdbc:hive://"; - - /** - * If host is provided, without a port. - */ - private static final String DEFAULT_PORT = "10000"; - - /** - * Property key for the database name. - */ - private static final String DBNAME_PROPERTY_KEY = "DBNAME"; - - /** - * Property key for the Hive Server host. - */ - private static final String HOST_PROPERTY_KEY = "HOST"; - - /** - * Property key for the Hive Server port. - */ - private static final String PORT_PROPERTY_KEY = "PORT"; - - /** - * - */ - public HiveDriver() { - // TODO Auto-generated constructor stub - SecurityManager security = System.getSecurityManager(); - if (security != null) { - security.checkWrite("foobah"); - } - } - - /** - * Checks whether a given url is in a valid format. - * - * The current uri format is: jdbc:hive://[host[:port]] - * - * jdbc:hive:// - run in embedded mode jdbc:hive://localhost - connect to - * localhost default port (10000) jdbc:hive://localhost:5050 - connect to - * localhost port 5050 - * - * TODO: - write a better regex. - decide on uri format - */ - - public boolean acceptsURL(String url) throws SQLException { - return Pattern.matches(URL_PREFIX + ".*", url); - } - - @Override - public Connection connect(String url, Properties info) throws SQLException { - return acceptsURL(url) ? new HiveConnection(url, info) : null; - } - - /** - * Package scoped access to the Driver's Major Version - * @return The Major version number of the driver. -1 if it cannot be determined from the - * manifest.mf file. - */ - static int getMajorDriverVersion() { - int version = -1; - try { - String fullVersion = HiveDriver.fetchManifestAttribute( - Attributes.Name.IMPLEMENTATION_VERSION); - String[] tokens = fullVersion.split("\\."); //$NON-NLS-1$ - - if(tokens != null && tokens.length > 0 && tokens[0] != null) { - version = Integer.parseInt(tokens[0]); - } - } catch (Exception e) { - // Possible reasons to end up here: - // - Unable to read version from manifest.mf - // - Version string is not in the proper X.x.xxx format - version = -1; - } - return version; - } - - /** - * Package scoped access to the Driver's Minor Version - * @return The Minor version number of the driver. -1 if it cannot be determined from the - * manifest.mf file. - */ - static int getMinorDriverVersion() { - int version = -1; - try { - String fullVersion = HiveDriver.fetchManifestAttribute( - Attributes.Name.IMPLEMENTATION_VERSION); - String[] tokens = fullVersion.split("\\."); //$NON-NLS-1$ - - if(tokens != null && tokens.length > 1 && tokens[1] != null) { - version = Integer.parseInt(tokens[1]); - } - } catch (Exception e) { - // Possible reasons to end up here: - // - Unable to read version from manifest.mf - // - Version string is not in the proper X.x.xxx format - version = -1; - } - return version; - } - - /** - * Returns the major version of this driver. - */ - public int getMajorVersion() { - return HiveDriver.getMajorDriverVersion(); - } - - /** - * Returns the minor version of this driver. - */ - public int getMinorVersion() { - return HiveDriver.getMinorDriverVersion(); - } - - public Logger getParentLogger() throws SQLFeatureNotSupportedException { - // JDK 1.7 - throw new SQLFeatureNotSupportedException("Method not supported"); - } - - public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException { - if (info == null) { - info = new Properties(); - } - - if ((url != null) && url.startsWith(URL_PREFIX)) { - info = parseURL(url, info); - } - - DriverPropertyInfo hostProp = new DriverPropertyInfo(HOST_PROPERTY_KEY, - info.getProperty(HOST_PROPERTY_KEY, "")); - hostProp.required = false; - hostProp.description = "Hostname of Hive Server"; - - DriverPropertyInfo portProp = new DriverPropertyInfo(PORT_PROPERTY_KEY, - info.getProperty(PORT_PROPERTY_KEY, "")); - portProp.required = false; - portProp.description = "Port number of Hive Server"; - - DriverPropertyInfo dbProp = new DriverPropertyInfo(DBNAME_PROPERTY_KEY, - info.getProperty(DBNAME_PROPERTY_KEY, "default")); - dbProp.required = false; - dbProp.description = "Database name"; - - DriverPropertyInfo[] dpi = new DriverPropertyInfo[3]; - - dpi[0] = hostProp; - dpi[1] = portProp; - dpi[2] = dbProp; - - return dpi; - } - - /** - * Returns whether the driver is JDBC compliant. - */ - - public boolean jdbcCompliant() { - return JDBC_COMPLIANT; - } - - /** - * Takes a url in the form of jdbc:hive://[hostname]:[port]/[db_name] and - * parses it. Everything after jdbc:hive// is optional. - * - * @param url - * @param defaults - * @return - * @throws java.sql.SQLException - */ - private Properties parseURL(String url, Properties defaults) throws SQLException { - Properties urlProps = (defaults != null) ? new Properties(defaults) - : new Properties(); - - if (url == null || !url.startsWith(URL_PREFIX)) { - throw new SQLException("Invalid connection url: " + url); - } - - if (url.length() <= URL_PREFIX.length()) { - return urlProps; - } - - // [hostname]:[port]/[db_name] - String connectionInfo = url.substring(URL_PREFIX.length()); - - // [hostname]:[port] [db_name] - String[] hostPortAndDatabase = connectionInfo.split("/", 2); - - // [hostname]:[port] - if (hostPortAndDatabase[0].length() > 0) { - String[] hostAndPort = hostPortAndDatabase[0].split(":", 2); - urlProps.put(HOST_PROPERTY_KEY, hostAndPort[0]); - if (hostAndPort.length > 1) { - urlProps.put(PORT_PROPERTY_KEY, hostAndPort[1]); - } else { - urlProps.put(PORT_PROPERTY_KEY, DEFAULT_PORT); - } - } - - // [db_name] - if (hostPortAndDatabase.length > 1) { - urlProps.put(DBNAME_PROPERTY_KEY, hostPortAndDatabase[1]); - } - - return urlProps; - } - - /** - * Lazy-load manifest attributes as needed. - */ - private static Attributes manifestAttributes = null; - - /** - * Loads the manifest attributes from the jar. - * - * @throws java.net.MalformedURLException - * @throws IOException - */ - private static synchronized void loadManifestAttributes() throws IOException { - if (manifestAttributes != null) { - return; - } - Class clazz = HiveDriver.class; - String classContainer = clazz.getProtectionDomain().getCodeSource() - .getLocation().toString(); - URL manifestUrl = new URL("jar:" + classContainer - + "!/META-INF/MANIFEST.MF"); - Manifest manifest = new Manifest(manifestUrl.openStream()); - manifestAttributes = manifest.getMainAttributes(); - } - - /** - * Package scoped to allow manifest fetching from other HiveDriver classes - * Helper to initialize attributes and return one. - * - * @param attributeName - * @return - * @throws SQLException - */ - static String fetchManifestAttribute(Attributes.Name attributeName) - throws SQLException { - try { - loadManifestAttributes(); - } catch (IOException e) { - throw new SQLException("Couldn't load manifest attributes.", e); - } - return manifestAttributes.getValue(attributeName); - } - -} - diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveMetaDataResultSet.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveMetaDataResultSet.java index 9f7ae42..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveMetaDataResultSet.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveMetaDataResultSet.java @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -public abstract class HiveMetaDataResultSet extends HiveBaseResultSet { - protected final List data; - - @SuppressWarnings("unchecked") - public HiveMetaDataResultSet(final List columnNames - , final List columnTypes - , final List data) throws SQLException { - if (data!=null) { - this.data = new ArrayList(data); - } else { - this.data = new ArrayList(); - } - if (columnNames!=null) { - this.columnNames = new ArrayList(columnNames); - } else { - this.columnNames = new ArrayList(); - } - if (columnTypes!=null) { - this.columnTypes = new ArrayList(columnTypes); - } else { - this.columnTypes = new ArrayList(); - } - } - - @Override - public void close() throws SQLException { - } - -} diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HivePreparedStatement.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HivePreparedStatement.java index 827104a..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HivePreparedStatement.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HivePreparedStatement.java @@ -1,1300 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.io.InputStream; -import java.io.Reader; -import java.math.BigDecimal; -import java.net.URL; -import java.sql.Array; -import java.sql.Blob; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.Date; -import java.sql.NClob; -import java.sql.ParameterMetaData; -import java.sql.PreparedStatement; -import java.sql.Ref; -import java.sql.ResultSet; -import java.sql.ResultSetMetaData; -import java.sql.RowId; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Time; -import java.sql.Timestamp; -import java.text.MessageFormat; -import java.util.Calendar; -import java.util.HashMap; - -import org.apache.hadoop.hive.service.HiveInterface; -import org.apache.hadoop.hive.service.HiveServerException; - -/** - * HivePreparedStatement. - * - */ -public class HivePreparedStatement implements PreparedStatement { - private final String sql; - private HiveInterface client; - /** - * save the SQL parameters {paramLoc:paramValue} - */ - private final HashMap parameters=new HashMap(); - - /** - * We need to keep a reference to the result set to support the following: - * - * statement.execute(String sql); - * statement.getResultSet(); - * . - */ - private ResultSet resultSet = null; - /** - * The maximum number of rows this statement should return (0 => all rows). - */ - private int maxRows = 0; - - /** - * Add SQLWarnings to the warningChain if needed. - */ - private SQLWarning warningChain = null; - - /** - * Keep state so we can fail certain calls made after close(). - */ - private boolean isClosed = false; - - /** - * keep the current ResultRet update count - */ - private final int updateCount=0; - - /** - * - */ - public HivePreparedStatement(HiveInterface client, - String sql) { - this.client = client; - this.sql = sql; - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#addBatch() - */ - - public void addBatch() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#clearParameters() - */ - - public void clearParameters() throws SQLException { - this.parameters.clear(); - } - - /** - * Invokes executeQuery(sql) using the sql provided to the constructor. - * - * @return boolean Returns true if a resultSet is created, false if not. - * Note: If the result set is empty a true is returned. - * - * @throws SQLException - */ - - public boolean execute() throws SQLException { - ResultSet rs = executeImmediate(sql); - return rs != null; - } - - /** - * Invokes executeQuery(sql) using the sql provided to the constructor. - * - * @return ResultSet - * @throws SQLException - */ - - public ResultSet executeQuery() throws SQLException { - return executeImmediate(sql); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#executeUpdate() - */ - - public int executeUpdate() throws SQLException { - executeImmediate(sql); - return updateCount; - } - - /** - * Executes the SQL statement. - * - * @param sql The sql, as a string, to execute - * @return ResultSet - * @throws SQLException if the prepared statement is closed or there is a database error. - * caught Exceptions are thrown as SQLExceptions with the description - * "08S01". - */ - - protected ResultSet executeImmediate(String sql) throws SQLException { - if (isClosed) { - throw new SQLException("Can't execute after statement has been closed"); - } - - try { - clearWarnings(); - resultSet = null; - if (sql.contains("?")) { - sql = updateSql(sql, parameters); - } - client.execute(sql); - } catch (HiveServerException e) { - throw new SQLException(e.getMessage(), e.getSQLState(), e.getErrorCode(), e); - } catch (Exception ex) { - throw new SQLException(ex.toString(), "08S01", ex); - } - resultSet = new HiveQueryResultSet(client, maxRows); - return resultSet; - } - - /** - * update the SQL string with parameters set by setXXX methods of {@link PreparedStatement} - * - * @param sql - * @param parameters - * @return updated SQL string - */ - private String updateSql(final String sql, HashMap parameters) { - - StringBuffer newSql = new StringBuffer(sql); - - int paramLoc = 1; - while (getCharIndexFromSqlByParamLocation(sql, '?', paramLoc) > 0) { - // check the user has set the needs parameters - if (parameters.containsKey(paramLoc)) { - int tt = getCharIndexFromSqlByParamLocation(newSql.toString(), '?', 1); - newSql.deleteCharAt(tt); - newSql.insert(tt, parameters.get(paramLoc)); - } - paramLoc++; - } - - return newSql.toString(); - - } - - /** - * Get the index of given char from the SQL string by parameter location - *
The -1 will be return, if nothing found - * - * @param sql - * @param cchar - * @param paramLoc - * @return - */ - private int getCharIndexFromSqlByParamLocation(final String sql, final char cchar, final int paramLoc) { - int signalCount = 0; - int charIndex = -1; - int num = 0; - for (int i = 0; i < sql.length(); i++) { - char c = sql.charAt(i); - if (c == '\'' || c == '\\')// record the count of char "'" and char "\" - { - signalCount++; - } else if (c == cchar && signalCount % 2 == 0) {// check if the ? is really the parameter - num++; - if (num == paramLoc) { - charIndex = i; - break; - } - } - } - return charIndex; - } - - - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#getMetaData() - */ - - public ResultSetMetaData getMetaData() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#getParameterMetaData() - */ - - public ParameterMetaData getParameterMetaData() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setArray(int, java.sql.Array) - */ - - public void setArray(int i, Array x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream) - */ - - public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream, - * int) - */ - - public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setAsciiStream(int, java.io.InputStream, - * long) - */ - - public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBigDecimal(int, java.math.BigDecimal) - */ - - public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream) - */ - - public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream, - * int) - */ - - public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBinaryStream(int, java.io.InputStream, - * long) - */ - - public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBlob(int, java.sql.Blob) - */ - - public void setBlob(int i, Blob x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBlob(int, java.io.InputStream) - */ - - public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBlob(int, java.io.InputStream, long) - */ - - public void setBlob(int parameterIndex, InputStream inputStream, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBoolean(int, boolean) - */ - - public void setBoolean(int parameterIndex, boolean x) throws SQLException { - this.parameters.put(parameterIndex, ""+x); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setByte(int, byte) - */ - - public void setByte(int parameterIndex, byte x) throws SQLException { - this.parameters.put(parameterIndex, ""+x); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setBytes(int, byte[]) - */ - - public void setBytes(int parameterIndex, byte[] x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader) - */ - - public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader, - * int) - */ - - public void setCharacterStream(int parameterIndex, Reader reader, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setCharacterStream(int, java.io.Reader, - * long) - */ - - public void setCharacterStream(int parameterIndex, Reader reader, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setClob(int, java.sql.Clob) - */ - - public void setClob(int i, Clob x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setClob(int, java.io.Reader) - */ - - public void setClob(int parameterIndex, Reader reader) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setClob(int, java.io.Reader, long) - */ - - public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setDate(int, java.sql.Date) - */ - - public void setDate(int parameterIndex, Date x) throws SQLException { - this.parameters.put(parameterIndex, x.toString()); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setDate(int, java.sql.Date, - * java.util.Calendar) - */ - - public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setDouble(int, double) - */ - - public void setDouble(int parameterIndex, double x) throws SQLException { - this.parameters.put(parameterIndex,""+x); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setFloat(int, float) - */ - - public void setFloat(int parameterIndex, float x) throws SQLException { - this.parameters.put(parameterIndex,""+x); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setInt(int, int) - */ - - public void setInt(int parameterIndex, int x) throws SQLException { - this.parameters.put(parameterIndex,""+x); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setLong(int, long) - */ - - public void setLong(int parameterIndex, long x) throws SQLException { - this.parameters.put(parameterIndex,""+x); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNCharacterStream(int, java.io.Reader) - */ - - public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNCharacterStream(int, java.io.Reader, - * long) - */ - - public void setNCharacterStream(int parameterIndex, Reader value, long length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNClob(int, java.sql.NClob) - */ - - public void setNClob(int parameterIndex, NClob value) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNClob(int, java.io.Reader) - */ - - public void setNClob(int parameterIndex, Reader reader) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNClob(int, java.io.Reader, long) - */ - - public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNString(int, java.lang.String) - */ - - public void setNString(int parameterIndex, String value) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNull(int, int) - */ - - public void setNull(int parameterIndex, int sqlType) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setNull(int, int, java.lang.String) - */ - - public void setNull(int paramIndex, int sqlType, String typeName) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setObject(int, java.lang.Object) - */ - - public void setObject(int parameterIndex, Object x) throws SQLException { - if (x instanceof String) { - setString(parameterIndex, (String) x); - } else if (x instanceof Short) { - setShort(parameterIndex, ((Short) x).shortValue()); - } else if (x instanceof Integer) { - setInt(parameterIndex, ((Integer) x).intValue()); - } else if (x instanceof Long) { - setLong(parameterIndex, ((Long) x).longValue()); - } else if (x instanceof Float) { - setFloat(parameterIndex, ((Float) x).floatValue()); - } else if (x instanceof Double) { - setDouble(parameterIndex, ((Double) x).doubleValue()); - } else if (x instanceof Boolean) { - setBoolean(parameterIndex, ((Boolean) x).booleanValue()); - } else if (x instanceof Byte) { - setByte(parameterIndex, ((Byte) x).byteValue()); - } else if (x instanceof Character) { - setString(parameterIndex, ((Character) x).toString()); - } else { - // Can't infer a type. - throw new SQLException( - MessageFormat - .format( - "Can''t infer the SQL type to use for an instance of {0}. Use setObject() with an explicit Types value to specify the type to use.", - x.getClass().getName())); - } - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setObject(int, java.lang.Object, int) - */ - - public void setObject(int parameterIndex, Object x, int targetSqlType) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setObject(int, java.lang.Object, int, int) - */ - - public void setObject(int parameterIndex, Object x, int targetSqlType, int scale) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setRef(int, java.sql.Ref) - */ - - public void setRef(int i, Ref x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setRowId(int, java.sql.RowId) - */ - - public void setRowId(int parameterIndex, RowId x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setSQLXML(int, java.sql.SQLXML) - */ - - public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setShort(int, short) - */ - - public void setShort(int parameterIndex, short x) throws SQLException { - this.parameters.put(parameterIndex,""+x); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setString(int, java.lang.String) - */ - - public void setString(int parameterIndex, String x) throws SQLException { - x=x.replace("'", "\\'"); - this.parameters.put(parameterIndex,"'"+x+"'"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setTime(int, java.sql.Time) - */ - - public void setTime(int parameterIndex, Time x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setTime(int, java.sql.Time, - * java.util.Calendar) - */ - - public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setTimestamp(int, java.sql.Timestamp) - */ - - public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { - this.parameters.put(parameterIndex, x.toString()); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setTimestamp(int, java.sql.Timestamp, - * java.util.Calendar) - */ - - public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setURL(int, java.net.URL) - */ - - public void setURL(int parameterIndex, URL x) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.PreparedStatement#setUnicodeStream(int, java.io.InputStream, - * int) - */ - - public void setUnicodeStream(int parameterIndex, InputStream x, int length) - throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#addBatch(java.lang.String) - */ - - public void addBatch(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#cancel() - */ - - public void cancel() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#clearBatch() - */ - - public void clearBatch() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#clearWarnings() - */ - - public void clearWarnings() throws SQLException { - warningChain=null; - } - - - public void closeOnCompletion() throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - /** - * Closes the prepared statement. - * - * @throws SQLException - */ - - public void close() throws SQLException { - client = null; - if (resultSet!=null) { - resultSet.close(); - resultSet = null; - } - isClosed = true; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String) - */ - - public boolean execute(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, int) - */ - - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, int[]) - */ - - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, java.lang.String[]) - */ - - public boolean execute(String sql, String[] columnNames) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeBatch() - */ - - public int[] executeBatch() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeQuery(java.lang.String) - */ - - public ResultSet executeQuery(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String) - */ - - public int executeUpdate(String sql) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, int) - */ - - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, int[]) - */ - - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, java.lang.String[]) - */ - - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getConnection() - */ - - public Connection getConnection() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getFetchDirection() - */ - - public int getFetchDirection() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getFetchSize() - */ - - public int getFetchSize() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getGeneratedKeys() - */ - - public ResultSet getGeneratedKeys() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMaxFieldSize() - */ - - public int getMaxFieldSize() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMaxRows() - */ - - public int getMaxRows() throws SQLException { - return this.maxRows; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMoreResults() - */ - - public boolean getMoreResults() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMoreResults(int) - */ - - public boolean getMoreResults(int current) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getQueryTimeout() - */ - - public int getQueryTimeout() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSet() - */ - - public ResultSet getResultSet() throws SQLException { - return this.resultSet; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetConcurrency() - */ - - public int getResultSetConcurrency() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetHoldability() - */ - - public int getResultSetHoldability() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetType() - */ - - public int getResultSetType() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getUpdateCount() - */ - - public int getUpdateCount() throws SQLException { - return updateCount; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getWarnings() - */ - - public SQLWarning getWarnings() throws SQLException { - return warningChain; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#isClosed() - */ - - public boolean isClosed() throws SQLException { - return isClosed; - } - - public boolean isCloseOnCompletion() throws SQLException { - //JDK 1.7 - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#isPoolable() - */ - - public boolean isPoolable() throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setCursorName(java.lang.String) - */ - - public void setCursorName(String name) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setEscapeProcessing(boolean) - */ - - public void setEscapeProcessing(boolean enable) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setFetchDirection(int) - */ - - public void setFetchDirection(int direction) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setFetchSize(int) - */ - - public void setFetchSize(int rows) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setMaxFieldSize(int) - */ - - public void setMaxFieldSize(int max) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setMaxRows(int) - */ - - public void setMaxRows(int max) throws SQLException { - if (max < 0) { - throw new SQLException("max must be >= 0"); - } - this.maxRows = max; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setPoolable(boolean) - */ - - public void setPoolable(boolean poolable) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setQueryTimeout(int) - */ - - public void setQueryTimeout(int seconds) throws SQLException { - // TODO Auto-generated method stub - // throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#isWrapperFor(java.lang.Class) - */ - - public boolean isWrapperFor(Class iface) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#unwrap(java.lang.Class) - */ - - public T unwrap(Class iface) throws SQLException { - // TODO Auto-generated method stub - throw new SQLException("Method not supported"); - } - -} diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveQueryResultSet.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveQueryResultSet.java index e4c2591..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveQueryResultSet.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveQueryResultSet.java @@ -1,223 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Iterator; -import java.util.List; -import java.util.Properties; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Schema; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.SerDe; -import org.apache.hadoop.hive.serde2.SerDeUtils; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption; -import org.apache.hadoop.hive.serde2.objectinspector.StructField; -import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.service.HiveInterface; -import org.apache.hadoop.hive.service.HiveServerException; -import org.apache.hadoop.io.BytesWritable; - -/** - * HiveQueryResultSet. - * - */ -public class HiveQueryResultSet extends HiveBaseResultSet { - - public static final Log LOG = LogFactory.getLog(HiveQueryResultSet.class); - - private HiveInterface client; - private SerDe serde; - - private int maxRows = 0; - private int rowsFetched = 0; - private int fetchSize = 50; - - private List fetchedRows; - private Iterator fetchedRowsItr; - - public HiveQueryResultSet(HiveInterface client, int maxRows) throws SQLException { - this.client = client; - this.maxRows = maxRows; - initSerde(); - row = Arrays.asList(new Object[columnNames.size()]); - } - - public HiveQueryResultSet(HiveInterface client) throws SQLException { - this(client, 0); - } - - /** - * Instantiate the serde used to deserialize the result rows. - */ - private void initSerde() throws SQLException { - try { - Schema fullSchema = client.getSchema(); - List schema = fullSchema.getFieldSchemas(); - columnNames = new ArrayList(); - columnTypes = new ArrayList(); - StringBuilder namesSb = new StringBuilder(); - StringBuilder typesSb = new StringBuilder(); - - if ((schema != null) && (!schema.isEmpty())) { - for (int pos = 0; pos < schema.size(); pos++) { - if (pos != 0) { - namesSb.append(","); - typesSb.append(","); - } - columnNames.add(schema.get(pos).getName()); - columnTypes.add(schema.get(pos).getType()); - namesSb.append(schema.get(pos).getName()); - typesSb.append(schema.get(pos).getType()); - } - } - String names = namesSb.toString(); - String types = typesSb.toString(); - - serde = new LazySimpleSerDe(); - Properties props = new Properties(); - if (names.length() > 0) { - LOG.debug("Column names: " + names); - props.setProperty(serdeConstants.LIST_COLUMNS, names); - } - if (types.length() > 0) { - LOG.debug("Column types: " + types); - props.setProperty(serdeConstants.LIST_COLUMN_TYPES, types); - } - SerDeUtils.initializeSerDe(serde, new Configuration(), props, null); - - } catch (Exception ex) { - ex.printStackTrace(); - throw new SQLException("Could not create ResultSet: " + ex.getMessage(), ex); - } - } - - @Override - public void close() throws SQLException { - client = null; - } - - /** - * Moves the cursor down one row from its current position. - * - * @see java.sql.ResultSet#next() - * @throws SQLException - * if a database access error occurs. - */ - public boolean next() throws SQLException { - if (maxRows > 0 && rowsFetched >= maxRows) { - return false; - } - - try { - if (fetchedRows == null || !fetchedRowsItr.hasNext()) { - fetchedRows = client.fetchN(fetchSize); - fetchedRowsItr = fetchedRows.iterator(); - } - - String rowStr = ""; - if (fetchedRowsItr.hasNext()) { - rowStr = fetchedRowsItr.next(); - } else { - return false; - } - - rowsFetched++; - if (LOG.isDebugEnabled()) { - LOG.debug("Fetched row string: " + rowStr); - } - - StructObjectInspector soi = (StructObjectInspector) serde.getObjectInspector(); - List fieldRefs = soi.getAllStructFieldRefs(); - Object data = serde.deserialize(new BytesWritable(rowStr.getBytes())); - - assert row.size() == fieldRefs.size() : row.size() + ", " + fieldRefs.size(); - for (int i = 0; i < fieldRefs.size(); i++) { - StructField fieldRef = fieldRefs.get(i); - ObjectInspector oi = fieldRef.getFieldObjectInspector(); - Object obj = soi.getStructFieldData(data, fieldRef); - row.set(i, convertLazyToJava(obj, oi)); - } - - if (LOG.isDebugEnabled()) { - LOG.debug("Deserialized row: " + row); - } - } catch (HiveServerException e) { - if (e.getErrorCode() == 0) { // error code == 0 means reached the EOF - return false; - } else { - throw new SQLException("Error retrieving next row", e); - } - } catch (Exception ex) { - ex.printStackTrace(); - throw new SQLException("Error retrieving next row", ex); - } - // NOTE: fetchOne dosn't throw new SQLException("Method not supported"). - return true; - } - - @Override - public void setFetchSize(int rows) throws SQLException { - fetchSize = rows; - } - - @Override - public int getFetchSize() throws SQLException { - return fetchSize; - } - - public T getObject(String columnLabel, Class type) throws SQLException { - //JDK 1.7 - throw new SQLException("Method not supported"); - } - - public T getObject(int columnIndex, Class type) throws SQLException { - //JDK 1.7 - throw new SQLException("Method not supported"); - } - - /** - * Convert a LazyObject to a standard Java object in compliance with JDBC 3.0 (see JDBC 3.0 - * Specification, Table B-3: Mapping from JDBC Types to Java Object Types). - * - * This method is kept consistent with {@link HiveResultSetMetaData#hiveTypeToSqlType}. - */ - private static Object convertLazyToJava(Object o, ObjectInspector oi) { - Object obj = ObjectInspectorUtils.copyToStandardObject(o, oi, ObjectInspectorCopyOption.JAVA); - - // for now, expose non-primitive as a string - // TODO: expose non-primitive as a structured object while maintaining JDBC compliance - if (obj != null && oi.getCategory() != ObjectInspector.Category.PRIMITIVE) { - obj = obj.toString(); - } - - return obj; - } - -} - diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveResultSetMetaData.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveResultSetMetaData.java index 80c855d..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveResultSetMetaData.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveResultSetMetaData.java @@ -1,197 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.sql.ResultSetMetaData; -import java.sql.SQLException; -import java.util.List; - -import org.apache.hadoop.hive.serde.serdeConstants; - -/** - * HiveResultSetMetaData. - * - */ -public class HiveResultSetMetaData implements java.sql.ResultSetMetaData { - private final List columnNames; - private final List columnTypes; - - public HiveResultSetMetaData(List columnNames, - List columnTypes) { - this.columnNames = columnNames; - this.columnTypes = columnTypes; - } - - public String getCatalogName(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getColumnClassName(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public int getColumnCount() throws SQLException { - return columnNames.size(); - } - - public int getColumnDisplaySize(int column) throws SQLException { - int columnType = getColumnType(column); - - return JdbcColumn.columnDisplaySize(columnType); - } - - public String getColumnLabel(int column) throws SQLException { - return columnNames.get(column - 1); - } - - public String getColumnName(int column) throws SQLException { - return columnNames.get(column - 1); - } - - public int getColumnType(int column) throws SQLException { - if (columnTypes == null) { - throw new SQLException( - "Could not determine column type name for ResultSet"); - } - - if (column < 1 || column > columnTypes.size()) { - throw new SQLException("Invalid column value: " + column); - } - - // we need to convert the thrift type to the SQL type - String type = columnTypes.get(column - 1); - - // we need to convert the thrift type to the SQL type - return Utils.hiveTypeToSqlType(type); - } - - public String getColumnTypeName(int column) throws SQLException { - if (columnTypes == null) { - throw new SQLException( - "Could not determine column type name for ResultSet"); - } - - if (column < 1 || column > columnTypes.size()) { - throw new SQLException("Invalid column value: " + column); - } - - // we need to convert the Hive type to the SQL type name - // TODO: this would be better handled in an enum - String type = columnTypes.get(column - 1); - if ("string".equalsIgnoreCase(type)) { - return serdeConstants.STRING_TYPE_NAME; - } else if ("float".equalsIgnoreCase(type)) { - return serdeConstants.FLOAT_TYPE_NAME; - } else if ("double".equalsIgnoreCase(type)) { - return serdeConstants.DOUBLE_TYPE_NAME; - } else if ("boolean".equalsIgnoreCase(type)) { - return serdeConstants.BOOLEAN_TYPE_NAME; - } else if ("tinyint".equalsIgnoreCase(type)) { - return serdeConstants.TINYINT_TYPE_NAME; - } else if ("smallint".equalsIgnoreCase(type)) { - return serdeConstants.SMALLINT_TYPE_NAME; - } else if ("int".equalsIgnoreCase(type)) { - return serdeConstants.INT_TYPE_NAME; - } else if ("bigint".equalsIgnoreCase(type)) { - return serdeConstants.BIGINT_TYPE_NAME; - } else if ("date".equalsIgnoreCase(type)) { - return serdeConstants.DATE_TYPE_NAME; - } else if ("timestamp".equalsIgnoreCase(type)) { - return serdeConstants.TIMESTAMP_TYPE_NAME; - } else if (type.startsWith("decimal")) { - return serdeConstants.DECIMAL_TYPE_NAME; - } else if (type.startsWith("map<")) { - return serdeConstants.STRING_TYPE_NAME; - } else if (type.startsWith("array<")) { - return serdeConstants.STRING_TYPE_NAME; - } else if (type.startsWith("struct<")) { - return serdeConstants.STRING_TYPE_NAME; - } - - throw new SQLException("Unrecognized column type: " + type); - } - - public int getPrecision(int column) throws SQLException { - int columnType = getColumnType(column); - - return JdbcColumn.columnPrecision(columnType); - } - - public int getScale(int column) throws SQLException { - int columnType = getColumnType(column); - - return JdbcColumn.columnScale(columnType); - } - - public String getSchemaName(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public String getTableName(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isAutoIncrement(int column) throws SQLException { - // Hive doesn't have an auto-increment concept - return false; - } - - public boolean isCaseSensitive(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isCurrency(int column) throws SQLException { - // Hive doesn't support a currency type - return false; - } - - public boolean isDefinitelyWritable(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public int isNullable(int column) throws SQLException { - // Hive doesn't have the concept of not-null - return ResultSetMetaData.columnNullable; - } - - public boolean isReadOnly(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isSearchable(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isSigned(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isWritable(int column) throws SQLException { - throw new SQLException("Method not supported"); - } - - public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException("Method not supported"); - } - - public T unwrap(Class iface) throws SQLException { - throw new SQLException("Method not supported"); - } - -} diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveStatement.java index 23cb4cd..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveStatement.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/HiveStatement.java @@ -1,526 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLWarning; - -import org.apache.hadoop.hive.service.HiveInterface; -import org.apache.hadoop.hive.service.HiveServerException; - -/** - * HiveStatement. - * - */ -public class HiveStatement implements java.sql.Statement { - private HiveInterface client; - private int fetchSize = 50; - - /** - * We need to keep a reference to the result set to support the following: - * - * statement.execute(String sql); - * statement.getResultSet(); - * . - */ - private ResultSet resultSet = null; - - /** - * The maximum number of rows this statement should return (0 => all rows). - */ - private int maxRows = 0; - - /** - * Add SQLWarnings to the warningChain if needed. - */ - private SQLWarning warningChain = null; - - /** - * Keep state so we can fail certain calls made after close(). - */ - private boolean isClosed = false; - - /** - * - */ - public HiveStatement(HiveInterface client) { - this.client = client; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#addBatch(java.lang.String) - */ - - public void addBatch(String sql) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#cancel() - */ - - public void cancel() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#clearBatch() - */ - - public void clearBatch() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#clearWarnings() - */ - - public void clearWarnings() throws SQLException { - warningChain = null; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#close() - */ - - public void close() throws SQLException { - client = null; - resultSet = null; - isClosed = true; - } - - public void closeOnCompletion() throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String) - */ - - public boolean execute(String sql) throws SQLException { - ResultSet rs = executeQuery(sql); - - // TODO: this should really check if there are results, but there's no easy - // way to do that without calling rs.next(); - return rs != null; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, int) - */ - - public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, int[]) - */ - - public boolean execute(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#execute(java.lang.String, java.lang.String[]) - */ - - public boolean execute(String sql, String[] columnNames) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeBatch() - */ - - public int[] executeBatch() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeQuery(java.lang.String) - */ - - public ResultSet executeQuery(String sql) throws SQLException { - if (isClosed) { - throw new SQLException("Can't execute after statement has been closed"); - } - - try { - resultSet = null; - client.execute(sql); - } catch (HiveServerException e) { - throw new SQLException(e.getMessage(), e.getSQLState(), e.getErrorCode()); - } catch (Exception ex) { - throw new SQLException(ex.toString(), "08S01"); - } - resultSet = new HiveQueryResultSet(client, maxRows); - resultSet.setFetchSize(fetchSize); - return resultSet; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String) - */ - - public int executeUpdate(String sql) throws SQLException { - try { - client.execute(sql); - } catch (Exception ex) { - throw new SQLException(ex.toString()); - } - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, int) - */ - - public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, int[]) - */ - - public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#executeUpdate(java.lang.String, java.lang.String[]) - */ - - public int executeUpdate(String sql, String[] columnNames) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getConnection() - */ - - public Connection getConnection() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getFetchDirection() - */ - - public int getFetchDirection() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getFetchSize() - */ - - public int getFetchSize() throws SQLException { - return fetchSize; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getGeneratedKeys() - */ - - public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMaxFieldSize() - */ - - public int getMaxFieldSize() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMaxRows() - */ - - public int getMaxRows() throws SQLException { - return maxRows; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMoreResults() - */ - - public boolean getMoreResults() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getMoreResults(int) - */ - - public boolean getMoreResults(int current) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getQueryTimeout() - */ - - public int getQueryTimeout() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSet() - */ - - public ResultSet getResultSet() throws SQLException { - return resultSet; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetConcurrency() - */ - - public int getResultSetConcurrency() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetHoldability() - */ - - public int getResultSetHoldability() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getResultSetType() - */ - - public int getResultSetType() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getUpdateCount() - */ - - public int getUpdateCount() throws SQLException { - return 0; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#getWarnings() - */ - - public SQLWarning getWarnings() throws SQLException { - return warningChain; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#isClosed() - */ - - public boolean isClosed() throws SQLException { - return isClosed; - } - - public boolean isCloseOnCompletion() throws SQLException { - // JDK 1.7 - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#isPoolable() - */ - - public boolean isPoolable() throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setCursorName(java.lang.String) - */ - - public void setCursorName(String name) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setEscapeProcessing(boolean) - */ - - public void setEscapeProcessing(boolean enable) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setFetchDirection(int) - */ - - public void setFetchDirection(int direction) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setFetchSize(int) - */ - - public void setFetchSize(int rows) throws SQLException { - fetchSize = rows; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setMaxFieldSize(int) - */ - - public void setMaxFieldSize(int max) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setMaxRows(int) - */ - - public void setMaxRows(int max) throws SQLException { - if (max < 0) { - throw new SQLException("max must be >= 0"); - } - maxRows = max; - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setPoolable(boolean) - */ - - public void setPoolable(boolean poolable) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Statement#setQueryTimeout(int) - */ - - public void setQueryTimeout(int seconds) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#isWrapperFor(java.lang.Class) - */ - - public boolean isWrapperFor(Class iface) throws SQLException { - throw new SQLException("Method not supported"); - } - - /* - * (non-Javadoc) - * - * @see java.sql.Wrapper#unwrap(java.lang.Class) - */ - - public T unwrap(Class iface) throws SQLException { - throw new SQLException("Method not supported"); - } - -} diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/JdbcColumn.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/JdbcColumn.java index d3a87e2..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/JdbcColumn.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/JdbcColumn.java @@ -1,185 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.sql.SQLException; -import java.sql.Types; - -/** - * Column metadata. - */ -public class JdbcColumn { - private final String columnName; - private final String tableName; - private final String tableCatalog; - private final String type; - private final String comment; - private final int ordinalPos; - - JdbcColumn(String columnName, String tableName, String tableCatalog - , String type, String comment, int ordinalPos) { - this.columnName = columnName; - this.tableName = tableName; - this.tableCatalog = tableCatalog; - this.type = type; - this.comment = comment; - this.ordinalPos = ordinalPos; - } - - public String getColumnName() { - return columnName; - } - - public String getTableName() { - return tableName; - } - - public String getTableCatalog() { - return tableCatalog; - } - - public String getType() { - return type; - } - - public Integer getSqlType() throws SQLException { - return Utils.hiveTypeToSqlType(type); - } - - static int columnDisplaySize(int columnType) throws SQLException { - // according to hiveTypeToSqlType possible options are: - switch(columnType) { - case Types.BOOLEAN: - return columnPrecision(columnType); - case Types.VARCHAR: - return Integer.MAX_VALUE; // hive has no max limit for strings - case Types.TINYINT: - case Types.SMALLINT: - case Types.INTEGER: - case Types.BIGINT: - return columnPrecision(columnType) + 1; // allow +/- - case Types.DATE: - return 10; - case Types.TIMESTAMP: - return columnPrecision(columnType); - // see http://download.oracle.com/javase/6/docs/api/constant-values.html#java.lang.Float.MAX_EXPONENT - case Types.FLOAT: - return 24; // e.g. -(17#).e-### - // see http://download.oracle.com/javase/6/docs/api/constant-values.html#java.lang.Double.MAX_EXPONENT - case Types.DOUBLE: - return 25; // e.g. -(17#).e-#### - case Types.DECIMAL: - return Integer.MAX_VALUE; - default: - throw new SQLException("Invalid column type: " + columnType); - } - } - - static int columnPrecision(int columnType) throws SQLException { - // according to hiveTypeToSqlType possible options are: - switch(columnType) { - case Types.BOOLEAN: - return 1; - case Types.VARCHAR: - return Integer.MAX_VALUE; // hive has no max limit for strings - case Types.TINYINT: - return 3; - case Types.SMALLINT: - return 5; - case Types.INTEGER: - return 10; - case Types.BIGINT: - return 19; - case Types.FLOAT: - return 7; - case Types.DOUBLE: - return 15; - case Types.DATE: - return 10; - case Types.TIMESTAMP: - return 29; - case Types.DECIMAL: - return Integer.MAX_VALUE; - default: - throw new SQLException("Invalid column type: " + columnType); - } - } - - static int columnScale(int columnType) throws SQLException { - // according to hiveTypeToSqlType possible options are: - switch(columnType) { - case Types.BOOLEAN: - case Types.VARCHAR: - case Types.TINYINT: - case Types.SMALLINT: - case Types.INTEGER: - case Types.BIGINT: - case Types.DATE: - return 0; - case Types.FLOAT: - return 7; - case Types.DOUBLE: - return 15; - case Types.TIMESTAMP: - return 9; - case Types.DECIMAL: - return Integer.MAX_VALUE; - default: - throw new SQLException("Invalid column type: " + columnType); - } - } - - public Integer getColumnSize() throws SQLException { - int precision = columnPrecision(Utils.hiveTypeToSqlType(type)); - - return precision == 0 ? null : precision; - } - - public Integer getDecimalDigits() throws SQLException { - return columnScale(Utils.hiveTypeToSqlType(type)); - } - - public Integer getNumPrecRadix() { - if (type.equalsIgnoreCase("tinyint")) { - return 10; - } else if (type.equalsIgnoreCase("smallint")) { - return 10; - } else if (type.equalsIgnoreCase("int")) { - return 10; - } else if (type.equalsIgnoreCase("bigint")) { - return 10; - } else if (type.equalsIgnoreCase("decimal")) { - return 10; - } else if (type.equalsIgnoreCase("float")) { - return 2; - } else if (type.equalsIgnoreCase("double")) { - return 2; - } else { // anything else including boolean and string is null - return null; - } - } - - public String getComment() { - return comment; - } - - public int getOrdinalPos() { - return ordinalPos; - } -} diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/JdbcTable.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/JdbcTable.java index 3aeb933..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/JdbcTable.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/JdbcTable.java @@ -1,58 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.sql.SQLException; - -/** - * Table metadata. - */ -public class JdbcTable { - private String tableCatalog; - private String tableName; - private String type; - private String comment; - - public JdbcTable(String tableCatalog, String tableName, String type, String comment) { - this.tableCatalog = tableCatalog; - this.tableName = tableName; - this.type = type; - this.comment = comment; - } - - public String getTableCatalog() { - return tableCatalog; - } - - public String getTableName() { - return tableName; - } - - public String getType() { - return type; - } - - public String getSqlTableType() throws SQLException { - return HiveDatabaseMetaData.toJdbcTableType(type); - } - - public String getComment() { - return comment; - } -} diff --git a/jdbc/src/java/org/apache/hadoop/hive/jdbc/Utils.java b/jdbc/src/java/org/apache/hadoop/hive/jdbc/Utils.java index ebeaa7b..e69de29 100644 --- a/jdbc/src/java/org/apache/hadoop/hive/jdbc/Utils.java +++ b/jdbc/src/java/org/apache/hadoop/hive/jdbc/Utils.java @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.jdbc; - -import java.sql.SQLException; -import java.sql.Types; - -public class Utils { - - /** - * Convert hive types to sql types. - * @param type - * @return Integer java.sql.Types values - * @throws SQLException - */ - public static int hiveTypeToSqlType(String type) throws SQLException { - if ("string".equalsIgnoreCase(type)) { - return Types.VARCHAR; - } else if ("float".equalsIgnoreCase(type)) { - return Types.FLOAT; - } else if ("double".equalsIgnoreCase(type)) { - return Types.DOUBLE; - } else if ("boolean".equalsIgnoreCase(type)) { - return Types.BOOLEAN; - } else if ("tinyint".equalsIgnoreCase(type)) { - return Types.TINYINT; - } else if ("smallint".equalsIgnoreCase(type)) { - return Types.SMALLINT; - } else if ("int".equalsIgnoreCase(type)) { - return Types.INTEGER; - } else if ("bigint".equalsIgnoreCase(type)) { - return Types.BIGINT; - } else if ("date".equalsIgnoreCase(type)) { - return Types.DATE; - } else if ("timestamp".equalsIgnoreCase(type)) { - return Types.TIMESTAMP; - } else if (type.startsWith("decimal")) { - return Types.DECIMAL; - } else if (type.startsWith("map<")) { - return Types.VARCHAR; - } else if (type.startsWith("array<")) { - return Types.VARCHAR; - } else if (type.startsWith("struct<")) { - return Types.VARCHAR; - } - throw new SQLException("Unrecognized column type: " + type); - } - -} diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java index f915699..cfac55b 100644 --- a/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java @@ -52,6 +52,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hive.jdbc.Utils.JdbcConnectionParams; import org.apache.hive.service.auth.HiveAuthFactory; @@ -143,7 +144,9 @@ public HiveConnection(String uri, Properties info) throws SQLException { isEmbeddedMode = connParams.isEmbeddedMode(); if (isEmbeddedMode) { - client = new EmbeddedThriftBinaryCLIService(); + EmbeddedThriftBinaryCLIService embeddedClient = new EmbeddedThriftBinaryCLIService(); + embeddedClient.init(new HiveConf()); + client = embeddedClient; } else { // extract user/password from JDBC connection properties if its not supplied in the // connection URL diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index 4625801..2113263 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -1139,6 +1139,7 @@ const string META_TABLE_PARTITION_COLUMN_TYPES = "partition_columns.types", const string FILE_INPUT_FORMAT = "file.inputformat", const string FILE_OUTPUT_FORMAT = "file.outputformat", const string META_TABLE_STORAGE = "storage_handler", - +const string TABLE_IS_TRANSACTIONAL = "transactional", +const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction", diff --git a/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql b/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql index 6112321..65f5d8c 100644 --- a/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql +++ b/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql @@ -1,27 +1,2 @@ --- --- Create the table if it doesn't exist. --- -CREATE TABLE IF NOT EXISTS PART_COL_STATS ( - CS_ID NUMBER NOT NULL, - DB_NAME VARCHAR2(128) NOT NULL, - TABLE_NAME VARCHAR2(128) NOT NULL, - PARTITION_NAME VARCHAR2(767) NOT NULL, - COLUMN_NAME VARCHAR2(128) NOT NULL, - COLUMN_TYPE VARCHAR2(128) NOT NULL, - PART_ID NUMBER NOT NULL, - LONG_LOW_VALUE NUMBER, - LONG_HIGH_VALUE NUMBER, - DOUBLE_LOW_VALUE NUMBER, - DOUBLE_HIGH_VALUE NUMBER, - BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), - BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), - NUM_NULLS NUMBER NOT NULL, - NUM_DISTINCTS NUMBER, - AVG_COL_LEN NUMBER, - MAX_COL_LEN NUMBER, - NUM_TRUES NUMBER, - NUM_FALSES NUMBER, - LAST_ANALYZED NUMBER NOT NULL -); - CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + diff --git a/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql b/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql new file mode 100644 index 0000000..4723c3b --- /dev/null +++ b/metastore/scripts/upgrade/oracle/pre-0-upgrade-0.13.0-to-0.14.0.oracle.sql @@ -0,0 +1,23 @@ +-- Oracle has no easy way to do CREATE TABLE IF NOT EXISTS compatible with SchemaTool, so do it here + +CREATE TABLE PART_COL_STATS ( +CS_ID NUMBER NOT NULL, +DB_NAME VARCHAR2(128) NOT NULL, +TABLE_NAME VARCHAR2(128) NOT NULL, +PARTITION_NAME VARCHAR2(767) NOT NULL, +COLUMN_NAME VARCHAR2(128) NOT NULL, +COLUMN_TYPE VARCHAR2(128) NOT NULL, +PART_ID NUMBER NOT NULL, +LONG_LOW_VALUE NUMBER, +LONG_HIGH_VALUE NUMBER, +DOUBLE_LOW_VALUE NUMBER, +DOUBLE_HIGH_VALUE NUMBER, +BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), +BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), +NUM_NULLS NUMBER NOT NULL, +NUM_DISTINCTS NUMBER, +AVG_COL_LEN NUMBER, +MAX_COL_LEN NUMBER, +NUM_TRUES NUMBER, +NUM_FALSES NUMBER, +LAST_ANALYZED NUMBER NOT NULL); diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp index c1f9da7..c7bf9ba 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp @@ -53,6 +53,10 @@ hive_metastoreConstants::hive_metastoreConstants() { META_TABLE_STORAGE = "storage_handler"; + TABLE_IS_TRANSACTIONAL = "transactional"; + + TABLE_NO_AUTO_COMPACT = "no_auto_compaction"; + } }}} // namespace diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h index d41e2e8..35a8a50 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h @@ -36,6 +36,8 @@ class hive_metastoreConstants { std::string FILE_INPUT_FORMAT; std::string FILE_OUTPUT_FORMAT; std::string META_TABLE_STORAGE; + std::string TABLE_IS_TRANSACTIONAL; + std::string TABLE_NO_AUTO_COMPACT; }; extern const hive_metastoreConstants g_hive_metastore_constants; diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java index 8d17378..0f2fca8 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java @@ -75,4 +75,8 @@ public static final String META_TABLE_STORAGE = "storage_handler"; + public static final String TABLE_IS_TRANSACTIONAL = "transactional"; + + public static final String TABLE_NO_AUTO_COMPACT = "no_auto_compaction"; + } diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php index 8c3643f..cc2cdad 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -12890,4 +12890,8 @@ $GLOBALS['hive_metastore_CONSTANTS']['FILE_OUTPUT_FORMAT'] = "file.outputformat" $GLOBALS['hive_metastore_CONSTANTS']['META_TABLE_STORAGE'] = "storage_handler"; +$GLOBALS['hive_metastore_CONSTANTS']['TABLE_IS_TRANSACTIONAL'] = "transactional"; + +$GLOBALS['hive_metastore_CONSTANTS']['TABLE_NO_AUTO_COMPACT'] = "no_auto_compaction"; + diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py index e3e1fde..81f70eb 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py @@ -30,3 +30,5 @@ FILE_INPUT_FORMAT = "file.inputformat" FILE_OUTPUT_FORMAT = "file.outputformat" META_TABLE_STORAGE = "storage_handler" +TABLE_IS_TRANSACTIONAL = "transactional" +TABLE_NO_AUTO_COMPACT = "no_auto_compaction" diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb index 162b716..3208ecd 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb @@ -49,3 +49,7 @@ FILE_OUTPUT_FORMAT = %q"file.outputformat" META_TABLE_STORAGE = %q"storage_handler" +TABLE_IS_TRANSACTIONAL = %q"transactional" + +TABLE_NO_AUTO_COMPACT = %q"no_auto_compaction" + diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java b/metastore/src/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java new file mode 100644 index 0000000..b723484 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/DefaultMetaStoreFilterHookImpl.java @@ -0,0 +1,101 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.List; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.Table; + +/** + * Default no-op implementation of the MetaStoreFilterHook that returns the result as is + */ +public class DefaultMetaStoreFilterHookImpl implements MetaStoreFilterHook { + + public DefaultMetaStoreFilterHookImpl(HiveConf conf) { + } + + @Override + public List filterDatabases(List dbList) { + return dbList; + } + + @Override + public Database filterDatabase(Database dataBase) throws NoSuchObjectException { + return dataBase; + } + + @Override + public List filterTableNames(String dbName, List tableList) { + return tableList; + } + + @Override + public Table filterTable(Table table) throws NoSuchObjectException { + return table; + } + + @Override + public List
filterTables(List
tableList) { + return tableList; + } + + @Override + public List filterPartitions(List partitionList) { + return partitionList; + } + + @Override + public List filterPartitionSpecs( + List partitionSpecList) { + return partitionSpecList; + } + + @Override + public Partition filterPartition(Partition partition) throws NoSuchObjectException { + return partition; + } + + @Override + public List filterPartitionNames(String dbName, String tblName, + List partitionNames) { + return partitionNames; + } + + @Override + public Index filterIndex(Index index) throws NoSuchObjectException { + return index; + } + + @Override + public List filterIndexNames(String dbName, String tblName, + List indexList) { + return indexList; + } + + @Override + public List filterIndexes(List indexeList) { + return indexeList; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index a47619c..25b61a7 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -43,6 +43,7 @@ import java.util.Set; import java.util.Timer; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -5720,7 +5721,7 @@ public void run() { Lock startLock = new ReentrantLock(); Condition startCondition = startLock.newCondition(); - MetaStoreThread.BooleanPointer startedServing = new MetaStoreThread.BooleanPointer(); + AtomicBoolean startedServing = new AtomicBoolean(); startMetaStoreThreads(conf, startLock, startCondition, startedServing); startMetaStore(cli.port, ShimLoader.getHadoopThriftAuthBridge(), conf, startLock, startCondition, startedServing); @@ -5767,7 +5768,7 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, */ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, HiveConf conf, Lock startLock, Condition startCondition, - MetaStoreThread.BooleanPointer startedServing) throws Throwable { + AtomicBoolean startedServing) throws Throwable { try { isMetaStoreRemote = true; // Server will create new threads up to max as necessary. After an idle @@ -5851,7 +5852,7 @@ public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, private static void signalOtherThreadsToStart(final TServer server, final Lock startLock, final Condition startCondition, - final MetaStoreThread.BooleanPointer startedServing) { + final AtomicBoolean startedServing) { // A simple thread to wait until the server has started and then signal the other threads to // begin Thread t = new Thread() { @@ -5866,7 +5867,7 @@ public void run() { } while (!server.isServing()); startLock.lock(); try { - startedServing.boolVal = true; + startedServing.set(true); startCondition.signalAll(); } finally { startLock.unlock(); @@ -5882,7 +5883,7 @@ public void run() { */ private static void startMetaStoreThreads(final HiveConf conf, final Lock startLock, final Condition startCondition, final - MetaStoreThread.BooleanPointer startedServing) { + AtomicBoolean startedServing) { // A thread is spun up to start these other threads. That's because we can't start them // until after the TServer has started, but once TServer.serve is called we aren't given back // control. @@ -5900,7 +5901,7 @@ public void run() { try { // Per the javadocs on Condition, do not depend on the condition alone as a start gate // since spurious wake ups are possible. - while (!startedServing.boolVal) startCondition.await(); + while (!startedServing.get()) startCondition.await(); startCompactorInitiator(conf); startCompactorWorkers(conf); startCompactorCleaner(conf); @@ -5960,7 +5961,7 @@ private static void initializeAndStartThread(MetaStoreThread thread, HiveConf co LOG.info("Starting metastore thread of type " + thread.getClass().getName()); thread.setHiveConf(conf); thread.setThreadId(nextThreadId++); - thread.init(new MetaStoreThread.BooleanPointer(), new MetaStoreThread.BooleanPointer()); + thread.init(new AtomicBoolean(), new AtomicBoolean()); thread.start(); } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 9f2b4fc..b331754 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable; import java.io.IOException; +import java.lang.reflect.Constructor; import java.lang.reflect.InvocationHandler; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -147,6 +148,7 @@ protected final HiveConf conf; private String tokenStrForm; private final boolean localMetaStore; + private final MetaStoreFilterHook filterHook; private Map currentMetaVars; @@ -169,6 +171,7 @@ public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) conf = new HiveConf(HiveMetaStoreClient.class); } this.conf = conf; + filterHook = loadFilterHooks(); String msUri = conf.getVar(HiveConf.ConfVars.METASTOREURIS); localMetaStore = HiveConfUtil.isEmbeddedMetaStore(msUri); @@ -215,6 +218,31 @@ public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader) open(); } + private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException { + Class authProviderClass = conf. + getClass(HiveConf.ConfVars.METASTORE_FILTER_HOOK.varname, + DefaultMetaStoreFilterHookImpl.class, + MetaStoreFilterHook.class); + String msg = "Unable to create instance of " + authProviderClass.getName() + ": "; + try { + Constructor constructor = + authProviderClass.getConstructor(HiveConf.class); + return constructor.newInstance(conf); + } catch (NoSuchMethodException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (SecurityException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (InstantiationException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (IllegalAccessException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (IllegalArgumentException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (InvocationTargetException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } + } + /** * Swaps the first element of the metastoreUris array with a random element from the * remainder of the array. @@ -498,7 +526,7 @@ public int add_partitions(List new_parts) part.getDbName(), part.getTableName(), parts, ifNotExists); req.setNeedResult(needResults); AddPartitionsResult result = client.add_partitions_req(req); - return needResults ? result.getPartitions() : null; + return needResults ? filterHook.filterPartitions(result.getPartitions()) : null; } @Override @@ -667,7 +695,7 @@ public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownD for (String table : tableList) { try { // Subclasses can override this step (for example, for temporary tables) - dropTable(name, table, deleteData, false); + dropTable(name, table, deleteData, true); } catch (UnsupportedOperationException e) { // Ignore Index tables, those will be dropped with parent tables } @@ -904,7 +932,7 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException public List getDatabases(String databasePattern) throws MetaException { try { - return client.get_databases(databasePattern); + return filterHook.filterDatabases(client.get_databases(databasePattern)); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -915,7 +943,7 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException @Override public List getAllDatabases() throws MetaException { try { - return client.get_all_databases(); + return filterHook.filterDatabases(client.get_all_databases()); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -934,29 +962,30 @@ public boolean dropType(String type) throws NoSuchObjectException, MetaException @Override public List listPartitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions( - client.get_partitions(db_name, tbl_name, max_parts)); + return deepCopyPartitions(filterHook.filterPartitions( + client.get_partitions(db_name, tbl_name, max_parts))); } @Override public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException { - return PartitionSpecProxy.Factory.get(client.get_partitions_pspec(dbName, tableName, maxParts)); + return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + client.get_partitions_pspec(dbName, tableName, maxParts))); } @Override public List listPartitions(String db_name, String tbl_name, List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions( - client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts)); + return deepCopyPartitions(filterHook.filterPartitions( + client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts))); } @Override public List listPartitionsWithAuthInfo(String db_name, String tbl_name, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions( - client.get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names)); + return deepCopyPartitions(filterHook.filterPartitions( + client.get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names))); } @Override @@ -964,8 +993,8 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in String tbl_name, List part_vals, short max_parts, String user_name, List group_names) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions(client.get_partitions_ps_with_auth(db_name, - tbl_name, part_vals, max_parts, user_name, group_names)); + return deepCopyPartitions(filterHook.filterPartitions(client.get_partitions_ps_with_auth(db_name, + tbl_name, part_vals, max_parts, user_name, group_names))); } /** @@ -986,16 +1015,16 @@ public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, in public List listPartitionsByFilter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, TException { - return deepCopyPartitions( - client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts)); + return deepCopyPartitions(filterHook.filterPartitions( + client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts))); } @Override public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, String filter, int max_parts) throws MetaException, NoSuchObjectException, TException { - return PartitionSpecProxy.Factory.get( - client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)); + return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts))); } @Override @@ -1023,6 +1052,7 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr throw new IncompatibleMetastoreException( "Metastore doesn't support listPartitionsByExpr: " + te.getMessage()); } + r.setPartitions(filterHook.filterPartitions(r.getPartitions())); // TODO: in these methods, do we really need to deepcopy? deepCopyPartitions(r.getPartitions(), result); return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst. @@ -1040,7 +1070,7 @@ public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr @Override public Database getDatabase(String name) throws NoSuchObjectException, MetaException, TException { - return deepCopy(client.get_database(name)); + return deepCopy(filterHook.filterDatabase(client.get_database(name))); } /** @@ -1056,13 +1086,15 @@ public Database getDatabase(String name) throws NoSuchObjectException, @Override public Partition getPartition(String db_name, String tbl_name, List part_vals) throws NoSuchObjectException, MetaException, TException { - return deepCopy(client.get_partition(db_name, tbl_name, part_vals)); + return deepCopy(filterHook.filterPartition( + client.get_partition(db_name, tbl_name, part_vals))); } @Override public List getPartitionsByNames(String db_name, String tbl_name, List part_names) throws NoSuchObjectException, MetaException, TException { - return deepCopyPartitions(client.get_partitions_by_names(db_name, tbl_name, part_names)); + return deepCopyPartitions(filterHook.filterPartitions( + client.get_partitions_by_names(db_name, tbl_name, part_names))); } @Override @@ -1070,8 +1102,8 @@ public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, List part_vals, String user_name, List group_names) throws MetaException, UnknownTableException, NoSuchObjectException, TException { - return deepCopy(client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, - group_names)); + return deepCopy(filterHook.filterPartition(client.get_partition_with_auth(db_name, + tbl_name, part_vals, user_name, group_names))); } /** @@ -1088,7 +1120,7 @@ public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, @Override public Table getTable(String dbname, String name) throws MetaException, TException, NoSuchObjectException { - return deepCopy(client.get_table(dbname, name)); + return deepCopy(filterHook.filterTable(client.get_table(dbname, name))); } /** {@inheritDoc} */ @@ -1096,21 +1128,23 @@ public Table getTable(String dbname, String name) throws MetaException, @Deprecated public Table getTable(String tableName) throws MetaException, TException, NoSuchObjectException { - return getTable(DEFAULT_DATABASE_NAME, tableName); + return filterHook.filterTable(getTable(DEFAULT_DATABASE_NAME, tableName)); } /** {@inheritDoc} */ @Override public List
getTableObjectsByName(String dbName, List tableNames) throws MetaException, InvalidOperationException, UnknownDBException, TException { - return deepCopyTables(client.get_table_objects_by_name(dbName, tableNames)); + return deepCopyTables(filterHook.filterTables( + client.get_table_objects_by_name(dbName, tableNames))); } /** {@inheritDoc} */ @Override public List listTableNamesByFilter(String dbName, String filter, short maxTables) throws MetaException, TException, InvalidOperationException, UnknownDBException { - return client.get_table_names_by_filter(dbName, filter, maxTables); + return filterHook.filterTableNames(dbName, + client.get_table_names_by_filter(dbName, filter, maxTables)); } /** @@ -1129,7 +1163,7 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE @Override public List getTables(String dbname, String tablePattern) throws MetaException { try { - return client.get_tables(dbname, tablePattern); + return filterHook.filterTableNames(dbname, client.get_tables(dbname, tablePattern)); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -1140,7 +1174,7 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE @Override public List getAllTables(String dbname) throws MetaException { try { - return client.get_all_tables(dbname); + return filterHook.filterTableNames(dbname, client.get_all_tables(dbname)); } catch (Exception e) { MetaStoreUtils.logAndThrowMetaException(e); } @@ -1151,11 +1185,10 @@ public Type getType(String name) throws NoSuchObjectException, MetaException, TE public boolean tableExists(String databaseName, String tableName) throws MetaException, TException, UnknownDBException { try { - client.get_table(databaseName, tableName); + return filterHook.filterTable(client.get_table(databaseName, tableName)) == null; } catch (NoSuchObjectException e) { return false; } - return true; } /** {@inheritDoc} */ @@ -1169,14 +1202,16 @@ public boolean tableExists(String tableName) throws MetaException, @Override public List listPartitionNames(String dbName, String tblName, short max) throws MetaException, TException { - return client.get_partition_names(dbName, tblName, max); + return filterHook.filterPartitionNames(dbName, tblName, + client.get_partition_names(dbName, tblName, max)); } @Override public List listPartitionNames(String db_name, String tbl_name, List part_vals, short max_parts) throws MetaException, TException, NoSuchObjectException { - return client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + return filterHook.filterPartitionNames(db_name, tbl_name, + client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)); } @Override @@ -1259,7 +1294,7 @@ public void alter_index(String dbname, String base_tbl_name, String idx_name, In public Index getIndex(String dbName, String tblName, String indexName) throws MetaException, UnknownTableException, NoSuchObjectException, TException { - return deepCopy(client.get_index_by_name(dbName, tblName, indexName)); + return deepCopy(filterHook.filterIndex(client.get_index_by_name(dbName, tblName, indexName))); } /** @@ -1275,7 +1310,7 @@ public Index getIndex(String dbName, String tblName, String indexName) @Override public List listIndexNames(String dbName, String tblName, short max) throws MetaException, TException { - return client.get_index_names(dbName, tblName, max); + return filterHook.filterIndexNames(dbName, tblName, client.get_index_names(dbName, tblName, max)); } /** @@ -1291,7 +1326,7 @@ public Index getIndex(String dbName, String tblName, String indexName) @Override public List listIndexes(String dbName, String tblName, short max) throws NoSuchObjectException, MetaException, TException { - return client.get_indexes(dbName, tblName, max); + return filterHook.filterIndexes(client.get_indexes(dbName, tblName, max)); } /** {@inheritDoc} */ @@ -1380,7 +1415,7 @@ public String getConfigValue(String name, String defaultValue) @Override public Partition getPartition(String db, String tableName, String partName) throws MetaException, TException, UnknownTableException, NoSuchObjectException { - return deepCopy(client.get_partition_by_name(db, tableName, partName)); + return deepCopy(filterHook.filterPartition(client.get_partition_by_name(db, tableName, partName))); } public Partition appendPartitionByName(String dbName, String tableName, String partName) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index b6c633c..35f6f26 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -32,6 +32,7 @@ import java.util.Map; import java.util.TreeMap; +import javax.jdo.JDODataStoreException; import javax.jdo.PersistenceManager; import javax.jdo.Query; import javax.jdo.Transaction; @@ -40,6 +41,10 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.derby.iapi.error.StandardException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; @@ -80,64 +85,108 @@ * to SQL stores only. There's always a way to do without direct SQL. */ class MetaStoreDirectSql { - private static final Log LOG = LogFactory.getLog(MetaStoreDirectSql.class); + private static enum DB { + MYSQL, + ORACLE, + MSSQL, + DERBY, + OTHER + } + + private static final int NO_BATCHING = -1, DETECT_BATCHING = 0; + private static final Log LOG = LogFactory.getLog(MetaStoreDirectSql.class); private final PersistenceManager pm; /** - * We want to avoid db-specific code in this class and stick with ANSI SQL. However, mysql - * and postgres are differently ansi-incompatible (mysql by default doesn't support quoted - * identifiers, and postgres contravenes ANSI by coercing unquoted ones to lower case). + * We want to avoid db-specific code in this class and stick with ANSI SQL. However: + * 1) mysql and postgres are differently ansi-incompatible (mysql by default doesn't support + * quoted identifiers, and postgres contravenes ANSI by coercing unquoted ones to lower case). * MySQL's way of working around this is simpler (just set ansi quotes mode on), so we will - * use that. MySQL detection is done by actually issuing the set-ansi-quotes command. + * use that. MySQL detection is done by actually issuing the set-ansi-quotes command; + * + * Use sparingly, we don't want to devolve into another DataNucleus... */ - private final boolean isMySql; + private final DB dbType; + private final int batchSize; /** * Whether direct SQL can be used with the current datastore backing {@link #pm}. */ private final boolean isCompatibleDatastore; - - public MetaStoreDirectSql(PersistenceManager pm) { + + public MetaStoreDirectSql(PersistenceManager pm, Configuration conf) { this.pm = pm; - Transaction tx = pm.currentTransaction(); - tx.begin(); - boolean isMySql = false; + this.dbType = determineDbType(); + int batchSize = HiveConf.getIntVar(conf, ConfVars.METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE); + if (batchSize == DETECT_BATCHING) { + batchSize = (dbType == DB.ORACLE || dbType == DB.MSSQL) ? 1000 : NO_BATCHING; + } + this.batchSize = batchSize; + + this.isCompatibleDatastore = ensureDbInit() && runTestQuery(); + if (isCompatibleDatastore) { + LOG.info("Using direct SQL, underlying DB is " + dbType); + } + } + + private DB determineDbType() { + DB dbType = DB.OTHER; + if (runDbCheck("SET @@session.sql_mode=ANSI_QUOTES", "MySql")) { + dbType = DB.MYSQL; + } else if (runDbCheck("SELECT version FROM v$instance", "Oracle")) { + dbType = DB.ORACLE; + } else if (runDbCheck("SELECT @@version", "MSSQL")) { + dbType = DB.MSSQL; + } else { + // TODO: maybe we should use getProductName to identify all the DBs + String productName = getProductName(); + if (productName != null && productName.toLowerCase().contains("derby")) { + dbType = DB.DERBY; + } + } + return dbType; + } + + private String getProductName() { + JDOConnection jdoConn = pm.getDataStoreConnection(); try { - trySetAnsiQuotesForMysql(); - isMySql = true; - } catch (SQLException sqlEx) { - LOG.info("MySQL check failed, assuming we are not on mysql: " + sqlEx.getMessage()); - tx.rollback(); - tx = pm.currentTransaction(); - tx.begin(); + return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName(); + } catch (Throwable t) { + LOG.warn("Error retrieving product name", t); + return null; + } finally { + jdoConn.close(); // We must release the connection before we call other pm methods. } + } - boolean isCompatibleDatastore = true; + private boolean ensureDbInit() { + Transaction tx = pm.currentTransaction(); try { // Force the underlying db to initialize. pm.newQuery(MDatabase.class, "name == ''").execute(); pm.newQuery(MTableColumnStatistics.class, "dbName == ''").execute(); pm.newQuery(MPartitionColumnStatistics.class, "dbName == ''").execute(); + return true; } catch (Exception ex) { - isCompatibleDatastore = false; - LOG.error("Database initialization failed; direct SQL is disabled", ex); + LOG.warn("Database initialization failed; direct SQL is disabled", ex); tx.rollback(); + return false; } - if (isCompatibleDatastore) { - // Self-test query. If it doesn't work, we will self-disable. What a PITA... - String selfTestQuery = "select \"DB_ID\" from \"DBS\""; - try { - pm.newQuery("javax.jdo.query.SQL", selfTestQuery).execute(); - tx.commit(); - } catch (Exception ex) { - isCompatibleDatastore = false; - LOG.error("Self-test query [" + selfTestQuery + "] failed; direct SQL is disabled", ex); - tx.rollback(); - } - } + } - this.isCompatibleDatastore = isCompatibleDatastore; - this.isMySql = isMySql; + private boolean runTestQuery() { + Transaction tx = pm.currentTransaction(); + // Run a self-test query. If it doesn't work, we will self-disable. What a PITA... + String selfTestQuery = "select \"DB_ID\" from \"DBS\""; + try { + pm.newQuery("javax.jdo.query.SQL", selfTestQuery).execute(); + tx.commit(); + return true; + } catch (Exception ex) { + LOG.warn("Self-test query [" + selfTestQuery + "] failed; direct SQL is disabled", ex); + tx.rollback(); + return false; + } } public boolean isCompatibleDatastore() { @@ -150,22 +199,16 @@ public boolean isCompatibleDatastore() { * here - for eg., for MySQL, we signal that we want to use ANSI SQL quoting behaviour */ private void doDbSpecificInitializationsBeforeQuery() throws MetaException { - if (!isMySql) return; + if (dbType != DB.MYSQL) return; try { assert pm.currentTransaction().isActive(); // must be inside tx together with queries - trySetAnsiQuotesForMysql(); + executeNoResult("SET @@session.sql_mode=ANSI_QUOTES"); } catch (SQLException sqlEx) { throw new MetaException("Error setting ansi quotes: " + sqlEx.getMessage()); } } - /** - * MySQL, by default, doesn't recognize ANSI quotes which we need to have for Postgres. - * Try to set the ANSI quotes mode on for the session. Due to connection pooling, needs - * to be called in the same transaction as the actual queries. - */ - private void trySetAnsiQuotesForMysql() throws SQLException { - final String queryText = "SET @@session.sql_mode=ANSI_QUOTES"; + private void executeNoResult(final String queryText) throws SQLException { JDOConnection jdoConn = pm.getDataStoreConnection(); boolean doTrace = LOG.isDebugEnabled(); try { @@ -177,6 +220,23 @@ private void trySetAnsiQuotesForMysql() throws SQLException { } } + private boolean runDbCheck(String queryText, String name) { + Transaction tx = pm.currentTransaction(); + if (!tx.isActive()) { + tx.begin(); + } + try { + executeNoResult(queryText); + return true; + } catch (Throwable t) { + LOG.debug(name + " check failed, assuming we are not on " + name + ": " + t.getMessage()); + tx.rollback(); + tx = pm.currentTransaction(); + tx.begin(); + return false; + } + } + public Database getDatabase(String dbName) throws MetaException{ Query queryDbSelector = null; Query queryDbParams = null; @@ -197,8 +257,8 @@ public Database getDatabase(String dbName) throws MetaException{ + " with param [" + params[0] + "]"); } - @SuppressWarnings("unchecked") - List sqlResult = (List)queryDbSelector.executeWithArray(params); + List sqlResult = executeWithArray( + queryDbSelector, params, queryTextDbSelector); if ((sqlResult == null) || sqlResult.isEmpty()) { return null; } @@ -209,7 +269,7 @@ public Database getDatabase(String dbName) throws MetaException{ } Object[] dbline = sqlResult.get(0); - Long dbid = StatObjectConverter.extractSqlLong(dbline[0]); + Long dbid = extractSqlLong(dbline[0]); String queryTextDbParams = "select \"PARAM_KEY\", \"PARAM_VALUE\" " + " FROM \"DATABASE_PARAMS\" " @@ -223,10 +283,11 @@ public Database getDatabase(String dbName) throws MetaException{ } Map dbParams = new HashMap(); - List sqlResult2 = ensureList(queryDbParams.executeWithArray(params)); + List sqlResult2 = ensureList(executeWithArray( + queryDbParams, params, queryTextDbParams)); if (!sqlResult2.isEmpty()) { for (Object[] line : sqlResult2) { - dbParams.put(extractSqlString(line[0]),extractSqlString(line[1])); + dbParams.put(extractSqlString(line[0]), extractSqlString(line[1])); } } Database db = new Database(); @@ -256,20 +317,20 @@ public Database getDatabase(String dbName) throws MetaException{ /** * Gets partitions by using direct SQL queries. + * Note that batching is not needed for this method - list of names implies the batch size; * @param dbName Metastore db name. * @param tblName Metastore table name. * @param partNames Partition names to get. - * @param max The maximum number of partitions to return. * @return List of partitions. */ public List getPartitionsViaSqlFilter( - String dbName, String tblName, List partNames, Integer max) throws MetaException { + String dbName, String tblName, List partNames) throws MetaException { if (partNames.isEmpty()) { return new ArrayList(); } return getPartitionsViaSqlFilterInternal(dbName, tblName, null, "\"PARTITIONS\".\"PART_NAME\" in (" + makeParams(partNames.size()) + ")", - partNames, new ArrayList(), max); + partNames, new ArrayList(), null); } /** @@ -284,12 +345,16 @@ public Database getDatabase(String dbName) throws MetaException{ assert tree != null; List params = new ArrayList(); List joins = new ArrayList(); - String sqlFilter = PartitionFilterGenerator.generateSqlFilter(table, tree, params, joins); + // Derby and Oracle do not interpret filters ANSI-properly in some cases and need a workaround. + boolean dbHasJoinCastBug = (dbType == DB.DERBY || dbType == DB.ORACLE); + String sqlFilter = PartitionFilterGenerator.generateSqlFilter( + table, tree, params, joins, dbHasJoinCastBug); if (sqlFilter == null) { return null; // Cannot make SQL filter to push down. } + Boolean isViewTable = isViewTable(table); return getPartitionsViaSqlFilterInternal(table.getDbName(), table.getTableName(), - isViewTable(table), sqlFilter, params, joins, max); + isViewTable, sqlFilter, params, joins, max); } /** @@ -317,7 +382,7 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException Object[] params = new Object[] { tblName, dbName }; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); query.setUnique(true); - Object result = query.executeWithArray(params); + Object result = executeWithArray(query, params, queryText); return (result != null) && result.toString().equals(TableType.VIRTUAL_VIEW.toString()); } @@ -374,26 +439,46 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException if (max != null) { query.setRange(0, max.shortValue()); } - @SuppressWarnings("unchecked") - List sqlResult = (List)query.executeWithArray(params); + List sqlResult = executeWithArray(query, params, queryText); long queryTime = doTrace ? System.nanoTime() : 0; if (sqlResult.isEmpty()) { timingTrace(doTrace, queryText, start, queryTime); return new ArrayList(); // no partitions, bail early. } + // Get full objects. For Oracle, do it in batches. + List result = null; + if (batchSize != NO_BATCHING && batchSize < sqlResult.size()) { + result = new ArrayList(sqlResult.size()); + while (result.size() < sqlResult.size()) { + int toIndex = Math.min(result.size() + batchSize, sqlResult.size()); + List batchedSqlResult = sqlResult.subList(result.size(), toIndex); + result.addAll(getPartitionsFromPartitionIds(dbName, tblName, isView, batchedSqlResult)); + } + } else { + result = getPartitionsFromPartitionIds(dbName, tblName, isView, sqlResult); + } + + timingTrace(doTrace, queryText, start, queryTime); + query.closeAll(); + return result; + } + + private List getPartitionsFromPartitionIds(String dbName, String tblName, + Boolean isView, List partIdList) throws MetaException { + boolean doTrace = LOG.isDebugEnabled(); + int idStringWidth = (int)Math.ceil(Math.log10(partIdList.size())) + 1; // 1 for comma + int sbCapacity = partIdList.size() * idStringWidth; // Prepare StringBuilder for "PART_ID in (...)" to use in future queries. - int sbCapacity = sqlResult.size() * 7; // if there are 100k things => 6 chars, plus comma StringBuilder partSb = new StringBuilder(sbCapacity); - // Assume db and table names are the same for all partition, that's what we're selecting for. - for (Object partitionId : sqlResult) { - partSb.append(StatObjectConverter.extractSqlLong(partitionId)).append(","); + for (Object partitionId : partIdList) { + partSb.append(extractSqlLong(partitionId)).append(","); } String partIds = trimCommaList(partSb); - timingTrace(doTrace, queryText, start, queryTime); - // Now get most of the other fields. - queryText = + // Get most of the fields for the IDs provided. + // Assume db and table names are the same for all partition, as provided in arguments. + String queryText = "select \"PARTITIONS\".\"PART_ID\", \"SDS\".\"SD_ID\", \"SDS\".\"CD_ID\"," + " \"SERDES\".\"SERDE_ID\", \"PARTITIONS\".\"CREATE_TIME\"," + " \"PARTITIONS\".\"LAST_ACCESS_TIME\", \"SDS\".\"INPUT_FORMAT\", \"SDS\".\"IS_COMPRESSED\"," @@ -403,11 +488,11 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException + " left outer join \"SDS\" on \"PARTITIONS\".\"SD_ID\" = \"SDS\".\"SD_ID\" " + " left outer join \"SERDES\" on \"SDS\".\"SERDE_ID\" = \"SERDES\".\"SERDE_ID\" " + "where \"PART_ID\" in (" + partIds + ") order by \"PART_NAME\" asc"; - start = doTrace ? System.nanoTime() : 0; - query = pm.newQuery("javax.jdo.query.SQL", queryText); + long start = doTrace ? System.nanoTime() : 0; + Query query = pm.newQuery("javax.jdo.query.SQL", queryText); @SuppressWarnings("unchecked") - List sqlResult2 = (List)query.executeWithArray(params); - queryTime = doTrace ? System.nanoTime() : 0; + List sqlResult = executeWithArray(query, null, queryText); + long queryTime = doTrace ? System.nanoTime() : 0; // Read all the fields and create partitions, SDs and serdes. TreeMap partitions = new TreeMap(); @@ -415,19 +500,19 @@ private boolean isViewTable(String dbName, String tblName) throws MetaException TreeMap serdes = new TreeMap(); TreeMap> colss = new TreeMap>(); // Keep order by name, consistent with JDO. - ArrayList orderedResult = new ArrayList(sqlResult.size()); + ArrayList orderedResult = new ArrayList(partIdList.size()); // Prepare StringBuilder-s for "in (...)" lists to use in one-to-many queries. StringBuilder sdSb = new StringBuilder(sbCapacity), serdeSb = new StringBuilder(sbCapacity); StringBuilder colsSb = new StringBuilder(7); // We expect that there's only one field schema. tblName = tblName.toLowerCase(); dbName = dbName.toLowerCase(); - for (Object[] fields : sqlResult2) { + for (Object[] fields : sqlResult) { // Here comes the ugly part... - long partitionId = StatObjectConverter.extractSqlLong(fields[0]); - Long sdId = StatObjectConverter.extractSqlLong(fields[1]); - Long colId = StatObjectConverter.extractSqlLong(fields[2]); - Long serdeId = StatObjectConverter.extractSqlLong(fields[3]); + long partitionId = extractSqlLong(fields[0]); + Long sdId = extractSqlLong(fields[1]); + Long colId = extractSqlLong(fields[2]); + Long serdeId = extractSqlLong(fields[3]); // A partition must have either everything set, or nothing set if it's a view. if (sdId == null || colId == null || serdeId == null) { if (isView == null) { @@ -596,7 +681,7 @@ public void apply(StorageDescriptor t, Object[] fields) throws MetaException { currentListId = null; t.getSkewedInfo().addToSkewedColValues(new ArrayList()); } else { - long fieldsListId = StatObjectConverter.extractSqlLong(fields[1]); + long fieldsListId = extractSqlLong(fields[1]); if (currentListId == null || fieldsListId != currentListId) { currentList = new ArrayList(); currentListId = fieldsListId; @@ -638,7 +723,7 @@ public void apply(StorageDescriptor t, Object[] fields) throws MetaException { currentList = new ArrayList(); // left outer join produced a list with no values currentListId = null; } else { - long fieldsListId = StatObjectConverter.extractSqlLong(fields[1]); + long fieldsListId = extractSqlLong(fields[1]); if (currentListId == null || fieldsListId != currentListId) { currentList = new ArrayList(); currentListId = fieldsListId; @@ -683,6 +768,14 @@ private void timingTrace(boolean doTrace, String queryText, long start, long que (System.nanoTime() - queryTime) / 1000000.0 + "ms, the query is [" + queryText + "]"); } + static Long extractSqlLong(Object obj) throws MetaException { + if (obj == null) return null; + if (!(obj instanceof Number)) { + throw new MetaException("Expected numeric type but got " + obj.getClass().getName()); + } + return ((Number)obj).longValue(); + } + private static Boolean extractSqlBoolean(Object value) throws MetaException { // MySQL has booleans, but e.g. Derby uses 'Y'/'N' mapping. People using derby probably // don't care about performance anyway, but let's cover the common case. @@ -749,7 +842,7 @@ private static String trimCommaList(StringBuilder sb) { if (fields == null) { fields = iter.next(); } - long nestedId = StatObjectConverter.extractSqlLong(fields[keyIndex]); + long nestedId = extractSqlLong(fields[keyIndex]); if (nestedId < id) throw new MetaException("Found entries for unknown ID " + nestedId); if (nestedId > id) break; // fields belong to one of the next entries func.apply(entry.getValue(), fields); @@ -767,12 +860,14 @@ private static String trimCommaList(StringBuilder sb) { private final FilterBuilder filterBuffer; private final List params; private final List joins; + private final boolean dbHasJoinCastBug; private PartitionFilterGenerator( - Table table, List params, List joins) { + Table table, List params, List joins, boolean dbHasJoinCastBug) { this.table = table; this.params = params; this.joins = joins; + this.dbHasJoinCastBug = dbHasJoinCastBug; this.filterBuffer = new FilterBuilder(false); } @@ -783,13 +878,14 @@ private PartitionFilterGenerator( * @param joins the joins necessary for the resulting expression * @return the string representation of the expression tree */ - public static String generateSqlFilter(Table table, - ExpressionTree tree, List params, List joins) throws MetaException { + private static String generateSqlFilter(Table table, ExpressionTree tree, + List params, List joins, boolean dbHasJoinCastBug) throws MetaException { assert table != null; if (tree.getRoot() == null) { return ""; } - PartitionFilterGenerator visitor = new PartitionFilterGenerator(table, params, joins); + PartitionFilterGenerator visitor = new PartitionFilterGenerator( + table, params, joins, dbHasJoinCastBug); tree.accept(visitor); if (visitor.filterBuffer.hasError()) { LOG.info("Unable to push down SQL filter: " + visitor.filterBuffer.getErrorMessage()); @@ -928,11 +1024,15 @@ public void visit(LeafNode node) throws MetaException { tableValue = "cast(" + tableValue + " as date)"; } - // This is a workaround for DERBY-6358; as such, it is pretty horrible. - tableValue = "(case when \"TBLS\".\"TBL_NAME\" = ? and \"DBS\".\"NAME\" = ? then " - + tableValue + " else null end)"; - params.add(table.getTableName().toLowerCase()); - params.add(table.getDbName().toLowerCase()); + if (dbHasJoinCastBug) { + // This is a workaround for DERBY-6358 and Oracle bug; it is pretty horrible. + tableValue = "(case when \"TBLS\".\"TBL_NAME\" = ? and \"DBS\".\"NAME\" = ? and " + + "\"FILTER" + partColIndex + "\".\"PART_ID\" = \"PARTITIONS\".\"PART_ID\" and " + + "\"FILTER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex + " then " + + tableValue + " else null end)"; + params.add(table.getTableName().toLowerCase()); + params.add(table.getDbName().toLowerCase()); + } } if (!node.isReverseOrder) { params.add(nodeValue); @@ -961,7 +1061,7 @@ public ColumnStatistics getTableStats( for (int i = 0; i < colNames.size(); ++i) { params[i + 2] = colNames.get(i); } - Object qResult = query.executeWithArray(params); + Object qResult = executeWithArray(query, params, queryText); long queryTime = doTrace ? System.nanoTime() : 0; if (qResult == null) { query.closeAll(); @@ -978,8 +1078,7 @@ public ColumnStatistics getTableStats( public AggrStats aggrColStatsForPartitions(String dbName, String tableName, List partNames, List colNames) throws MetaException { - long partsFound = partsFoundForPartitions(dbName, tableName, partNames, - colNames); + long partsFound = partsFoundForPartitions(dbName, tableName, partNames, colNames); List stats = columnStatisticsObjForPartitions(dbName, tableName, partNames, colNames, partsFound); return new AggrStats(stats, partsFound); @@ -989,21 +1088,21 @@ private long partsFoundForPartitions(String dbName, String tableName, List partNames, List colNames) throws MetaException { long partsFound = 0; boolean doTrace = LOG.isDebugEnabled(); - String qText = "select count(\"COLUMN_NAME\") from \"PART_COL_STATS\"" + String queryText = "select count(\"COLUMN_NAME\") from \"PART_COL_STATS\"" + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " group by \"PARTITION_NAME\""; long start = doTrace ? System.nanoTime() : 0; - Query query = pm.newQuery("javax.jdo.query.SQL", qText); - Object qResult = query.executeWithArray(prepareParams(dbName, tableName, - partNames, colNames)); + Query query = pm.newQuery("javax.jdo.query.SQL", queryText); + Object qResult = executeWithArray(query, prepareParams( + dbName, tableName, partNames, colNames), queryText); long end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, qText, start, end); + timingTrace(doTrace, queryText, start, end); ForwardQueryResult fqr = (ForwardQueryResult) qResult; Iterator iter = fqr.iterator(); while (iter.hasNext()) { - if (StatObjectConverter.extractSqlLong(iter.next()) == colNames.size()) { + if (extractSqlLong(iter.next()) == colNames.size()) { partsFound++; } } @@ -1013,12 +1112,14 @@ private long partsFoundForPartitions(String dbName, String tableName, private List columnStatisticsObjForPartitions( String dbName, String tableName, List partNames, List colNames, long partsFound) throws MetaException { + // TODO: all the extrapolation logic should be moved out of this class, + // only mechanical data retrieval should remain here. String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", " + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), " + "min(\"BIG_DECIMAL_LOW_VALUE\"), max(\"BIG_DECIMAL_HIGH_VALUE\"), sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), " + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\") from \"PART_COL_STATS\"" + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "; - String qText = null; + String queryText = null; long start = 0; long end = 0; Query query = null; @@ -1028,20 +1129,20 @@ private long partsFoundForPartitions(String dbName, String tableName, // Check if the status of all the columns of all the partitions exists // Extrapolation is not needed. if (partsFound == partNames.size()) { - qText = commonPrefix + queryText = commonPrefix + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; - query = pm.newQuery("javax.jdo.query.SQL", qText); - qResult = query.executeWithArray(prepareParams(dbName, tableName, - partNames, colNames)); + query = pm.newQuery("javax.jdo.query.SQL", queryText); + qResult = executeWithArray(query, prepareParams( + dbName, tableName, partNames, colNames), queryText); if (qResult == null) { query.closeAll(); return Lists.newArrayList(); } end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, qText, start, end); + timingTrace(doTrace, queryText, start, end); List list = ensureList(qResult); List colStats = new ArrayList( list.size()); @@ -1056,18 +1157,18 @@ private long partsFoundForPartitions(String dbName, String tableName, // We need to extrapolate this partition based on the other partitions List colStats = new ArrayList( colNames.size()); - qText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") " + queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") " + " from \"PART_COL_STATS\"" + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; - query = pm.newQuery("javax.jdo.query.SQL", qText); - qResult = query.executeWithArray(prepareParams(dbName, tableName, - partNames, colNames)); + query = pm.newQuery("javax.jdo.query.SQL", queryText); + qResult = executeWithArray(query, prepareParams( + dbName, tableName, partNames, colNames), queryText); end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, qText, start, end); + timingTrace(doTrace, queryText, start, end); if (qResult == null) { query.closeAll(); return Lists.newArrayList(); @@ -1082,7 +1183,7 @@ private long partsFoundForPartitions(String dbName, String tableName, // count(\"PARTITION_NAME\")==partNames.size() // Or, extrapolation is not possible for this column if // count(\"PARTITION_NAME\")<2 - Long count = StatObjectConverter.extractSqlLong(row[2]); + Long count = extractSqlLong(row[2]); if (count == partNames.size() || count < 2) { noExtraColumnNames.add(colName); } else { @@ -1092,14 +1193,14 @@ private long partsFoundForPartitions(String dbName, String tableName, query.closeAll(); // Extrapolation is not needed for columns noExtraColumnNames if (noExtraColumnNames.size() != 0) { - qText = commonPrefix + queryText = commonPrefix + " and \"COLUMN_NAME\" in ("+ makeParams(noExtraColumnNames.size()) + ")" + " and \"PARTITION_NAME\" in ("+ makeParams(partNames.size()) +")" + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; start = doTrace ? System.nanoTime() : 0; - query = pm.newQuery("javax.jdo.query.SQL", qText); - qResult = query.executeWithArray(prepareParams(dbName, tableName, - partNames, noExtraColumnNames)); + query = pm.newQuery("javax.jdo.query.SQL", queryText); + qResult = executeWithArray(query, prepareParams( + dbName, tableName, partNames, noExtraColumnNames), queryText); if (qResult == null) { query.closeAll(); return Lists.newArrayList(); @@ -1109,7 +1210,7 @@ private long partsFoundForPartitions(String dbName, String tableName, colStats.add(prepareCSObj(row, 0)); } end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, qText, start, end); + timingTrace(doTrace, queryText, start, end); query.closeAll(); } // Extrapolation is needed for extraColumnNames. @@ -1121,18 +1222,18 @@ private long partsFoundForPartitions(String dbName, String tableName, } // get sum for all columns to reduce the number of queries Map> sumMap = new HashMap>(); - qText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\")" + queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\")" + " from \"PART_COL_STATS\"" + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + " and \"COLUMN_NAME\" in (" +makeParams(extraColumnNameTypeParts.size())+ ")" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\""; start = doTrace ? System.nanoTime() : 0; - query = pm.newQuery("javax.jdo.query.SQL", qText); + query = pm.newQuery("javax.jdo.query.SQL", queryText); List extraColumnNames = new ArrayList(); extraColumnNames.addAll(extraColumnNameTypeParts.keySet()); - qResult = query.executeWithArray(prepareParams(dbName, tableName, - partNames, extraColumnNames)); + qResult = executeWithArray(query, prepareParams( + dbName, tableName, partNames, extraColumnNames), queryText); if (qResult == null) { query.closeAll(); return Lists.newArrayList(); @@ -1148,7 +1249,7 @@ private long partsFoundForPartitions(String dbName, String tableName, sumMap.put((String) row[0], indexToObject); } end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, qText, start, end); + timingTrace(doTrace, queryText, start, end); query.closeAll(); for (Map.Entry entry : extraColumnNameTypeParts .entrySet()) { @@ -1177,23 +1278,23 @@ private long partsFoundForPartitions(String dbName, String tableName, if (o == null) { row[2 + colStatIndex] = null; } else { - Long val = StatObjectConverter.extractSqlLong(o); + Long val = extractSqlLong(o); row[2 + colStatIndex] = (Long) (val / sumVal * (partNames.size())); } } else { // if the aggregation type is min/max, we extrapolate from the // left/right borders - qText = "select \"" + queryText = "select \"" + colStatName + "\",\"PARTITION_NAME\" from \"PART_COL_STATS\"" + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" - + " and \"COLUMN_NAME\" in (" +makeParams(1)+ ")" + + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + " order by \'" + colStatName + "\'"; start = doTrace ? System.nanoTime() : 0; - query = pm.newQuery("javax.jdo.query.SQL", qText); - qResult = query.executeWithArray(prepareParams(dbName, - tableName, partNames, Arrays.asList(colName))); + query = pm.newQuery("javax.jdo.query.SQL", queryText); + qResult = executeWithArray(query, prepareParams( + dbName, tableName, partNames, Arrays.asList(colName)), queryText); if (qResult == null) { query.closeAll(); return Lists.newArrayList(); @@ -1202,7 +1303,7 @@ private long partsFoundForPartitions(String dbName, String tableName, Object[] min = (Object[]) (fqr.get(0)); Object[] max = (Object[]) (fqr.get(fqr.size() - 1)); end = doTrace ? System.nanoTime() : 0; - timingTrace(doTrace, qText, start, end); + timingTrace(doTrace, queryText, start, end); query.closeAll(); if (min[0] == null || max[0] == null) { row[2 + colStatIndex] = null; @@ -1260,7 +1361,8 @@ private ColumnStatisticsObj prepareCSObj (Object[] row, int i) throws MetaExcept + makeParams(partNames.size()) + ") order by \"PARTITION_NAME\""; Query query = pm.newQuery("javax.jdo.query.SQL", queryText); - Object qResult = query.executeWithArray(prepareParams(dbName, tableName, partNames, colNames)); + Object qResult = executeWithArray(query, prepareParams( + dbName, tableName, partNames, colNames), queryText); long queryTime = doTrace ? System.nanoTime() : 0; if (qResult == null) { query.closeAll(); @@ -1306,8 +1408,8 @@ private ColumnStatistics makeColumnStats( // LastAnalyzed is stored per column but thrift has it per several; // get the lowest for now as nobody actually uses this field. Object laObj = row[offset + 14]; - if (laObj != null && (!csd.isSetLastAnalyzed() || csd.getLastAnalyzed() > StatObjectConverter.extractSqlLong(laObj))) { - csd.setLastAnalyzed(StatObjectConverter.extractSqlLong(laObj)); + if (laObj != null && (!csd.isSetLastAnalyzed() || csd.getLastAnalyzed() > extractSqlLong(laObj))) { + csd.setLastAnalyzed(extractSqlLong(laObj)); } csos.add(prepareCSObj(row, offset)); } @@ -1327,4 +1429,23 @@ private String makeParams(int size) { // W/ size 0, query will fail, but at least we'd get to see the query in debug output. return (size == 0) ? "" : repeat(",?", size).substring(1); } + + @SuppressWarnings("unchecked") + private T executeWithArray(Query query, Object[] params, String sql) throws MetaException { + try { + return (T)((params == null) ? query.execute() : query.executeWithArray(params)); + } catch (Exception ex) { + String error = "Failed to execute [" + sql + "] with parameters ["; + if (params != null) { + boolean isFirst = true; + for (Object param : params) { + error += (isFirst ? "" : ", ") + param; + isFirst = false; + } + } + LOG.warn(error + "]", ex); + // We just logged an exception with (in case of JDO) a humongous callstack. Make a new one. + throw new MetaException("See previous errors; " + ex.getMessage()); + } + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java new file mode 100644 index 0000000..51f63ad --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.List; + +import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; +import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.Table; + +/** + * Metadata filter hook for metastore client. This will be useful for authorization + * plugins on hiveserver2 to filter metadata results, especially in case of + * non-impersonation mode where the metastore doesn't know the end user's identity. + */ +@LimitedPrivate(value = { "Apache Sentry (Incubating)" }) +@Evolving +public interface MetaStoreFilterHook { + + /** + * Filter given list of databases + * @param dbList + * @return List of filtered Db names + */ + public List filterDatabases(List dbList); + + /** + * filter to given database object if applicable + * @param dataBase + * @return the same database if it's not filtered out + * @throws NoSuchObjectException + */ + public Database filterDatabase(Database dataBase) throws NoSuchObjectException; + + /** + * Filter given list of tables + * @param dbName + * @param tableList + * @returnList of filtered table names + */ + public List filterTableNames(String dbName, List tableList); + + /** + * filter to given table object if applicable + * @param table + * @return the same table if it's not filtered out + * @throws NoSuchObjectException + */ + public Table filterTable(Table table) throws NoSuchObjectException; + + /** + * Filter given list of tables + * @param dbName + * @param tableList + * @returnList of filtered table names + */ + public List
filterTables(List
tableList); + + /** + * Filter given list of partitions + * @param partitionList + * @return + */ + public List filterPartitions(List partitionList); + + /** + * Filter given list of partition specs + * @param partitionSpecList + * @return + */ + public List filterPartitionSpecs(List partitionSpecList); + + /** + * filter to given partition object if applicable + * @param partition + * @return the same partition object if it's not filtered out + * @throws NoSuchObjectException + */ + public Partition filterPartition(Partition partition) throws NoSuchObjectException; + + /** + * Filter given list of partition names + * @param dbName + * @param tblName + * @param partitionNames + * @return + */ + public List filterPartitionNames(String dbName, String tblName, + List partitionNames); + + public Index filterIndex(Index index) throws NoSuchObjectException; + + /** + * Filter given list of index names + * @param dbName + * @param tblName + * @param indexList + * @return + */ + public List filterIndexNames(String dbName, String tblName, + List indexList); + + /** + * Filter given list of index objects + * @param indexeList + * @return + */ + public List filterIndexes(List indexeList); +} + diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java index ff265e5..a0c8d3b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; +import java.util.concurrent.atomic.AtomicBoolean; + /** * A thread that runs in the metastore, separate from the threads in the thrift service. */ @@ -49,21 +51,12 @@ * thread should then assure that the loop has been gone completely through at * least once. */ - void init(BooleanPointer stop, BooleanPointer looped) throws MetaException; + void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException; /** * Run the thread in the background. This must not be called until - * {@link #init(org.apache.hadoop.hive.metastore.MetaStoreThread.BooleanPointer)} has + * {@link ##init(java.util.concurrent.atomic.AtomicBoolean, java.util.concurrent.atomic.AtomicBoolean)} has * been called. */ void start(); - - class BooleanPointer { - public boolean boolVal; - - public BooleanPointer() { - boolVal = false; - } - } - } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 2aa5d20..2a42a17 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -132,6 +132,8 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; import org.apache.hadoop.hive.metastore.parser.FilterLexer; import org.apache.hadoop.hive.metastore.parser.FilterParser; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.util.StringUtils; @@ -265,7 +267,7 @@ private void initialize(Properties dsProps) { isInitialized = pm != null; if (isInitialized) { expressionProxy = createExpressionProxy(hiveConf); - directSql = new MetaStoreDirectSql(pm); + directSql = new MetaStoreDirectSql(pm, hiveConf); } LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm + " created in the thread with id: " + Thread.currentThread().getId()); @@ -1999,7 +2001,7 @@ private Collection getPartitionPsQueryResults(String dbName, String tableName, return new GetListHelper(dbName, tblName, allowSql, allowJdo) { @Override protected List getSqlResult(GetHelper> ctx) throws MetaException { - return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null); + return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames); } @Override protected List getJdoResult( @@ -2052,7 +2054,7 @@ protected boolean getPartitionsByExprInternal(String dbName, String tblName, fin List partNames = new LinkedList(); hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn( ctx.getTable(), expr, defaultPartitionName, maxParts, partNames)); - result = directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames, null); + result = directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames); } return result; } @@ -2136,14 +2138,16 @@ private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr, result.addAll(getPartitionNamesNoTxn( table.getDbName(), table.getTableName(), maxParts)); List columnNames = new ArrayList(); + List typeInfos = new ArrayList(); for (FieldSchema fs : table.getPartitionKeys()) { columnNames.add(fs.getName()); + typeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(fs.getType())); } if (defaultPartName == null || defaultPartName.isEmpty()) { defaultPartName = HiveConf.getVar(getConf(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); } return expressionProxy.filterPartitionsByExpr( - columnNames, expr, defaultPartName, result); + columnNames, typeInfos, expr, defaultPartName, result); } /** diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java index 0787775..5195481 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java @@ -21,6 +21,7 @@ import java.util.List; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; /** * The proxy interface that metastore uses to manipulate and apply @@ -37,12 +38,14 @@ /** * Filters the partition names via serialized Hive expression. - * @param columnNames Partition column names in the underlying table. + * @param partColumnNames Partition column names in the underlying table. + * @param partColumnTypeInfos Partition column types in the underlying table * @param expr Serialized expression. * @param defaultPartitionName Default partition name from job or server configuration. * @param partitionNames Partition names; the list is modified in place. * @return Whether there were any unknown partitions preserved in the name list. */ - public boolean filterPartitionsByExpr(List columnNames, byte[] expr, + public boolean filterPartitionsByExpr(List partColumnNames, + List partColumnTypeInfos, byte[] expr, String defaultPartitionName, List partitionNames) throws MetaException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java index b18eb21..01ad36a 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.datanucleus.exceptions.NucleusException; @InterfaceAudience.Private @InterfaceStability.Evolving @@ -132,8 +133,9 @@ public Object invoke(final Object proxy, final Method method, final Object[] arg } throw e.getCause(); } else if (e.getCause() instanceof MetaException && e.getCause().getCause() != null - && e.getCause().getCause() instanceof javax.jdo.JDOException) { - // The JDOException may be wrapped further in a MetaException + && (e.getCause().getCause() instanceof javax.jdo.JDOException || + e.getCause().getCause() instanceof NucleusException)) { + // The JDOException or the Nucleus Exception may be wrapped further in a MetaException caughtException = e.getCause().getCause(); } else { LOG.error(ExceptionUtils.getStackTrace(e.getCause())); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java b/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 0f99cf3..475883b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -420,58 +420,58 @@ public static void fillColumnStatisticsData(String colType, ColumnStatisticsData colType = colType.toLowerCase(); if (colType.equals("boolean")) { BooleanColumnStatsData boolStats = new BooleanColumnStatsData(); - boolStats.setNumFalses(extractSqlLong(falses)); - boolStats.setNumTrues(extractSqlLong(trues)); - boolStats.setNumNulls(extractSqlLong(nulls)); + boolStats.setNumFalses(MetaStoreDirectSql.extractSqlLong(falses)); + boolStats.setNumTrues(MetaStoreDirectSql.extractSqlLong(trues)); + boolStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); data.setBooleanStats(boolStats); } else if (colType.equals("string") || colType.startsWith("varchar") || colType.startsWith("char")) { StringColumnStatsData stringStats = new StringColumnStatsData(); - stringStats.setNumNulls(extractSqlLong(nulls)); + stringStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); stringStats.setAvgColLen((Double)avglen); - stringStats.setMaxColLen(extractSqlLong(maxlen)); - stringStats.setNumDVs(extractSqlLong(dist)); + stringStats.setMaxColLen(MetaStoreDirectSql.extractSqlLong(maxlen)); + stringStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); data.setStringStats(stringStats); } else if (colType.equals("binary")) { BinaryColumnStatsData binaryStats = new BinaryColumnStatsData(); - binaryStats.setNumNulls(extractSqlLong(nulls)); + binaryStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); binaryStats.setAvgColLen((Double)avglen); - binaryStats.setMaxColLen(extractSqlLong(maxlen)); + binaryStats.setMaxColLen(MetaStoreDirectSql.extractSqlLong(maxlen)); data.setBinaryStats(binaryStats); } else if (colType.equals("bigint") || colType.equals("int") || colType.equals("smallint") || colType.equals("tinyint") || colType.equals("timestamp")) { LongColumnStatsData longStats = new LongColumnStatsData(); - longStats.setNumNulls(extractSqlLong(nulls)); + longStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); if (lhigh != null) { - longStats.setHighValue(extractSqlLong(lhigh)); + longStats.setHighValue(MetaStoreDirectSql.extractSqlLong(lhigh)); } if (llow != null) { - longStats.setLowValue(extractSqlLong(llow)); + longStats.setLowValue(MetaStoreDirectSql.extractSqlLong(llow)); } - longStats.setNumDVs(extractSqlLong(dist)); + longStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); data.setLongStats(longStats); } else if (colType.equals("double") || colType.equals("float")) { DoubleColumnStatsData doubleStats = new DoubleColumnStatsData(); - doubleStats.setNumNulls(extractSqlLong(nulls)); + doubleStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); if (dhigh != null) { doubleStats.setHighValue((Double)dhigh); } if (dlow != null) { doubleStats.setLowValue((Double)dlow); } - doubleStats.setNumDVs(extractSqlLong(dist)); + doubleStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); data.setDoubleStats(doubleStats); } else if (colType.startsWith("decimal")) { DecimalColumnStatsData decimalStats = new DecimalColumnStatsData(); - decimalStats.setNumNulls(extractSqlLong(nulls)); + decimalStats.setNumNulls(MetaStoreDirectSql.extractSqlLong(nulls)); if (dechigh != null) { decimalStats.setHighValue(createThriftDecimal((String)dechigh)); } if (declow != null) { decimalStats.setLowValue(createThriftDecimal((String)declow)); } - decimalStats.setNumDVs(extractSqlLong(dist)); + decimalStats.setNumDVs(MetaStoreDirectSql.extractSqlLong(dist)); data.setDecimalStats(decimalStats); } } @@ -484,12 +484,4 @@ private static Decimal createThriftDecimal(String s) { private static String createJdoDecimalString(Decimal d) { return new BigDecimal(new BigInteger(d.getUnscaled()), d.getScale()).toString(); } - - static Long extractSqlLong(Object obj) throws MetaException { - if (obj == null) return null; - if (!(obj instanceof Number)) { - throw new MetaException("Expected numeric type but got " + obj.getClass().getName()); - } - return ((Number)obj).longValue(); - } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index b8d1afc..60b041b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -367,20 +367,8 @@ private void generateJDOFilterOverPartitions(Configuration conf, Table table, partitionColumnIndex, partitionColumnCount, isOpEquals, filterBuilder); return; } - - String keyEqual = FileUtils.escapePathName(keyName) + "="; - String valString = "partitionName.substring("; - String indexOfKeyStr = ""; - if (partitionColumnIndex != 0) { - keyEqual = "/" + keyEqual; - indexOfKeyStr = "partitionName.indexOf(\"" + keyEqual + "\") + "; - valString += indexOfKeyStr; - } - valString += keyEqual.length(); - if (partitionColumnIndex != (partitionColumnCount - 1)) { - valString += ", partitionName.concat(\"/\").indexOf(\"/\", " + indexOfKeyStr + keyEqual.length() + ")"; - } - valString += ")"; + //get the value for a partition key form MPartition.values (PARTITION_KEY_VALUES) + String valString = "values.get(" + partitionColumnIndex + ")"; if (operator == Operator.LIKE) { if (isReverseOrder) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java index 7e08cd9..6a1b315 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/CompositePartitionSpecProxy.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.metastore.partition.spec; import org.apache.commons.logging.Log; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java index 9bdb142..154011e 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionListComposingSpecProxy.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.metastore.partition.spec; import org.apache.hadoop.hive.metastore.api.MetaException; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java index 12f9e1d..fdb0867 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecProxy.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.metastore.partition.spec; import org.apache.hadoop.hive.metastore.api.MetaException; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java index e1960ed..6a3e147 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/partition/spec/PartitionSpecWithSharedSDProxy.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.metastore.partition.spec; import org.apache.hadoop.hive.metastore.api.MetaException; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 50f58d0..04e65ea 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -911,8 +911,9 @@ protected void detectDeadlock(Connection conn, // If you change this function, remove the @Ignore from TestTxnHandler.deadlockIsDetected() // to test these changes. // MySQL and MSSQL use 40001 as the state code for rollback. Postgres uses 40001 and 40P01. - // Oracle seems to return different SQLStates each time, but the message always contains - // "deadlock detected", so I've used that instead. + // Oracle seems to return different SQLStates and messages each time, + // so I've tried to capture the different error messages (there appear to be fewer different + // error messages than SQL states). // Derby and newer MySQL driver use the new SQLTransactionRollbackException if (dbProduct == null) { determineDatabaseProduct(conn); @@ -921,7 +922,8 @@ protected void detectDeadlock(Connection conn, ((dbProduct == DatabaseProduct.MYSQL || dbProduct == DatabaseProduct.POSTGRES || dbProduct == DatabaseProduct.SQLSERVER) && e.getSQLState().equals("40001")) || (dbProduct == DatabaseProduct.POSTGRES && e.getSQLState().equals("40P01")) || - (dbProduct == DatabaseProduct.ORACLE && (e.getMessage().contains("deadlock detected")))) { + (dbProduct == DatabaseProduct.ORACLE && (e.getMessage().contains("deadlock detected") + || e.getMessage().contains("can't serialize access for this transaction")))) { if (deadlockCnt++ < ALLOWED_REPEATED_DEADLOCKS) { LOG.warn("Deadlock detected in " + caller + ", trying again."); throw new DeadlockException(); diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java index ecbc8c8..bae1391 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/MockPartitionExpressionForMetastore.java @@ -1,6 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import java.util.List; @@ -14,7 +33,9 @@ public String convertExprToFilter(byte[] expr) throws MetaException { } @Override - public boolean filterPartitionsByExpr(List columnNames, byte[] expr, String defaultPartitionName, List partitionNames) throws MetaException { + public boolean filterPartitionsByExpr(List partColumnNames, + List partColumnTypeInfos, byte[] expr, String defaultPartitionName, + List partitionNames) throws MetaException { return false; } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index 17a57f0..e48f55c 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.metastore; import org.apache.hadoop.hive.conf.HiveConf; diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index 446b174..e1f1f49 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -35,6 +35,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import static junit.framework.Assert.*; @@ -1104,7 +1105,7 @@ public void deadlockDetected() throws Exception { conn.commit(); txnHandler.closeDbConn(conn); - final MetaStoreThread.BooleanPointer sawDeadlock = new MetaStoreThread.BooleanPointer(); + final AtomicBoolean sawDeadlock = new AtomicBoolean(); final Connection conn1 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE); final Connection conn2 = txnHandler.getDbConn(Connection.TRANSACTION_SERIALIZABLE); @@ -1131,7 +1132,7 @@ public void run() { LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " + "exception is " + e.getClass().getName() + " msg is <" + e .getMessage() + ">"); - sawDeadlock.boolVal = true; + sawDeadlock.set(true); } } conn1.rollback(); @@ -1161,7 +1162,7 @@ public void run() { LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " + "exception is " + e.getClass().getName() + " msg is <" + e .getMessage() + ">"); - sawDeadlock.boolVal = true; + sawDeadlock.set(true); } } conn2.rollback(); @@ -1175,9 +1176,9 @@ public void run() { t2.start(); t1.join(); t2.join(); - if (sawDeadlock.boolVal) break; + if (sawDeadlock.get()) break; } - assertTrue(sawDeadlock.boolVal); + assertTrue(sawDeadlock.get()); } finally { conn1.rollback(); txnHandler.closeDbConn(conn1); diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml index 860d5e7..8e617d8 100644 --- a/packaging/src/main/assembly/bin.xml +++ b/packaging/src/main/assembly/bin.xml @@ -39,8 +39,9 @@ true true + org.apache.hadoop:* org.apache.hive.hcatalog:* - org.slf4j:* + org.slf4j:* @@ -328,6 +329,11 @@ hive-exec-log4j.properties.template + ${project.parent.basedir}/beeline/src/main/resources/beeline-log4j.properties + conf + beeline-log4j.properties.template + + ${project.parent.basedir}/hcatalog/README.txt hcatalog/share/doc/hcatalog diff --git a/packaging/src/main/assembly/src.xml b/packaging/src/main/assembly/src.xml index dae3150..d331640 100644 --- a/packaging/src/main/assembly/src.xml +++ b/packaging/src/main/assembly/src.xml @@ -48,6 +48,7 @@ NOTICE CHANGELOG RELEASE_NOTES.txt + accumulo-handler/**/* ant/**/* beeline/**/* bin/**/* diff --git a/pom.xml b/pom.xml index 2a2234c..cce2e5e 100644 --- a/pom.xml +++ b/pom.xml @@ -72,6 +72,9 @@ ${project.build.directory}/warehouse pfile:// + + + 1.0b3 3.3.0-release @@ -797,6 +800,7 @@ **/ql/exec/vector/udf/generic/*.java **/TestHiveServer2Concurrency.java **/TestHiveMetaStore.java + ${test.excludes.additional} true false @@ -881,6 +885,11 @@ **/gen-java/** **/testdata/** **/ptest2/*.md + **/test/org/apache/hadoop/hive/hbase/avro/** + **/avro_test.avpr + **/xmlReport.pl + **/*.html + **/sit diff --git a/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java index 2ba2838..662e058 100644 --- a/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java +++ b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/io/orc/OrcProto.java @@ -16333,14 +16333,32 @@ public Builder clearRowIndexStride() { // repeated uint32 version = 4 [packed = true]; /** * repeated uint32 version = 4 [packed = true]; + * + *
+     * the version of the file format
+     *   [0, 11] = Hive 0.11
+     *   [0, 12] = Hive 0.12
+     * 
*/ java.util.List getVersionList(); /** * repeated uint32 version = 4 [packed = true]; + * + *
+     * the version of the file format
+     *   [0, 11] = Hive 0.11
+     *   [0, 12] = Hive 0.12
+     * 
*/ int getVersionCount(); /** * repeated uint32 version = 4 [packed = true]; + * + *
+     * the version of the file format
+     *   [0, 11] = Hive 0.11
+     *   [0, 12] = Hive 0.12
+     * 
*/ int getVersion(int index); @@ -16354,6 +16372,28 @@ public Builder clearRowIndexStride() { */ long getMetadataLength(); + // optional uint32 writerVersion = 6; + /** + * optional uint32 writerVersion = 6; + * + *
+     * Version of the writer:
+     *   0 (or missing) = original
+     *   1 = HIVE-8732 fixed
+     * 
+ */ + boolean hasWriterVersion(); + /** + * optional uint32 writerVersion = 6; + * + *
+     * Version of the writer:
+     *   0 (or missing) = original
+     *   1 = HIVE-8732 fixed
+     * 
+ */ + int getWriterVersion(); + // optional string magic = 8000; /** * optional string magic = 8000; @@ -16483,8 +16523,13 @@ private PostScript( metadataLength_ = input.readUInt64(); break; } - case 64002: { + case 48: { bitField0_ |= 0x00000010; + writerVersion_ = input.readUInt32(); + break; + } + case 64002: { + bitField0_ |= 0x00000020; magic_ = input.readBytes(); break; } @@ -16584,6 +16629,12 @@ public long getCompressionBlockSize() { private java.util.List version_; /** * repeated uint32 version = 4 [packed = true]; + * + *
+     * the version of the file format
+     *   [0, 11] = Hive 0.11
+     *   [0, 12] = Hive 0.12
+     * 
*/ public java.util.List getVersionList() { @@ -16591,12 +16642,24 @@ public long getCompressionBlockSize() { } /** * repeated uint32 version = 4 [packed = true]; + * + *
+     * the version of the file format
+     *   [0, 11] = Hive 0.11
+     *   [0, 12] = Hive 0.12
+     * 
*/ public int getVersionCount() { return version_.size(); } /** * repeated uint32 version = 4 [packed = true]; + * + *
+     * the version of the file format
+     *   [0, 11] = Hive 0.11
+     *   [0, 12] = Hive 0.12
+     * 
*/ public int getVersion(int index) { return version_.get(index); @@ -16619,6 +16682,34 @@ public long getMetadataLength() { return metadataLength_; } + // optional uint32 writerVersion = 6; + public static final int WRITERVERSION_FIELD_NUMBER = 6; + private int writerVersion_; + /** + * optional uint32 writerVersion = 6; + * + *
+     * Version of the writer:
+     *   0 (or missing) = original
+     *   1 = HIVE-8732 fixed
+     * 
+ */ + public boolean hasWriterVersion() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional uint32 writerVersion = 6; + * + *
+     * Version of the writer:
+     *   0 (or missing) = original
+     *   1 = HIVE-8732 fixed
+     * 
+ */ + public int getWriterVersion() { + return writerVersion_; + } + // optional string magic = 8000; public static final int MAGIC_FIELD_NUMBER = 8000; private java.lang.Object magic_; @@ -16630,7 +16721,7 @@ public long getMetadataLength() { * */ public boolean hasMagic() { - return ((bitField0_ & 0x00000010) == 0x00000010); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string magic = 8000; @@ -16680,6 +16771,7 @@ private void initFields() { compressionBlockSize_ = 0L; version_ = java.util.Collections.emptyList(); metadataLength_ = 0L; + writerVersion_ = 0; magic_ = ""; } private byte memoizedIsInitialized = -1; @@ -16714,6 +16806,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) output.writeUInt64(5, metadataLength_); } if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeUInt32(6, writerVersion_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeBytes(8000, getMagicBytes()); } getUnknownFields().writeTo(output); @@ -16757,6 +16852,10 @@ public int getSerializedSize() { } if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(6, writerVersion_); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream .computeBytesSize(8000, getMagicBytes()); } size += getUnknownFields().getSerializedSize(); @@ -16889,8 +16988,10 @@ public Builder clear() { bitField0_ = (bitField0_ & ~0x00000008); metadataLength_ = 0L; bitField0_ = (bitField0_ & ~0x00000010); - magic_ = ""; + writerVersion_ = 0; bitField0_ = (bitField0_ & ~0x00000020); + magic_ = ""; + bitField0_ = (bitField0_ & ~0x00000040); return this; } @@ -16943,6 +17044,10 @@ public Builder clone() { if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000010; } + result.writerVersion_ = writerVersion_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { + to_bitField0_ |= 0x00000020; + } result.magic_ = magic_; result.bitField0_ = to_bitField0_; onBuilt(); @@ -16982,8 +17087,11 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.orc.OrcProto.PostScript ot if (other.hasMetadataLength()) { setMetadataLength(other.getMetadataLength()); } + if (other.hasWriterVersion()) { + setWriterVersion(other.getWriterVersion()); + } if (other.hasMagic()) { - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; magic_ = other.magic_; onChanged(); } @@ -17126,6 +17234,12 @@ private void ensureVersionIsMutable() { } /** * repeated uint32 version = 4 [packed = true]; + * + *
+       * the version of the file format
+       *   [0, 11] = Hive 0.11
+       *   [0, 12] = Hive 0.12
+       * 
*/ public java.util.List getVersionList() { @@ -17133,18 +17247,36 @@ private void ensureVersionIsMutable() { } /** * repeated uint32 version = 4 [packed = true]; + * + *
+       * the version of the file format
+       *   [0, 11] = Hive 0.11
+       *   [0, 12] = Hive 0.12
+       * 
*/ public int getVersionCount() { return version_.size(); } /** * repeated uint32 version = 4 [packed = true]; + * + *
+       * the version of the file format
+       *   [0, 11] = Hive 0.11
+       *   [0, 12] = Hive 0.12
+       * 
*/ public int getVersion(int index) { return version_.get(index); } /** * repeated uint32 version = 4 [packed = true]; + * + *
+       * the version of the file format
+       *   [0, 11] = Hive 0.11
+       *   [0, 12] = Hive 0.12
+       * 
*/ public Builder setVersion( int index, int value) { @@ -17155,6 +17287,12 @@ public Builder setVersion( } /** * repeated uint32 version = 4 [packed = true]; + * + *
+       * the version of the file format
+       *   [0, 11] = Hive 0.11
+       *   [0, 12] = Hive 0.12
+       * 
*/ public Builder addVersion(int value) { ensureVersionIsMutable(); @@ -17164,6 +17302,12 @@ public Builder addVersion(int value) { } /** * repeated uint32 version = 4 [packed = true]; + * + *
+       * the version of the file format
+       *   [0, 11] = Hive 0.11
+       *   [0, 12] = Hive 0.12
+       * 
*/ public Builder addAllVersion( java.lang.Iterable values) { @@ -17174,6 +17318,12 @@ public Builder addAllVersion( } /** * repeated uint32 version = 4 [packed = true]; + * + *
+       * the version of the file format
+       *   [0, 11] = Hive 0.11
+       *   [0, 12] = Hive 0.12
+       * 
*/ public Builder clearVersion() { version_ = java.util.Collections.emptyList(); @@ -17215,6 +17365,63 @@ public Builder clearMetadataLength() { return this; } + // optional uint32 writerVersion = 6; + private int writerVersion_ ; + /** + * optional uint32 writerVersion = 6; + * + *
+       * Version of the writer:
+       *   0 (or missing) = original
+       *   1 = HIVE-8732 fixed
+       * 
+ */ + public boolean hasWriterVersion() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * optional uint32 writerVersion = 6; + * + *
+       * Version of the writer:
+       *   0 (or missing) = original
+       *   1 = HIVE-8732 fixed
+       * 
+ */ + public int getWriterVersion() { + return writerVersion_; + } + /** + * optional uint32 writerVersion = 6; + * + *
+       * Version of the writer:
+       *   0 (or missing) = original
+       *   1 = HIVE-8732 fixed
+       * 
+ */ + public Builder setWriterVersion(int value) { + bitField0_ |= 0x00000020; + writerVersion_ = value; + onChanged(); + return this; + } + /** + * optional uint32 writerVersion = 6; + * + *
+       * Version of the writer:
+       *   0 (or missing) = original
+       *   1 = HIVE-8732 fixed
+       * 
+ */ + public Builder clearWriterVersion() { + bitField0_ = (bitField0_ & ~0x00000020); + writerVersion_ = 0; + onChanged(); + return this; + } + // optional string magic = 8000; private java.lang.Object magic_ = ""; /** @@ -17225,7 +17432,7 @@ public Builder clearMetadataLength() { * */ public boolean hasMagic() { - return ((bitField0_ & 0x00000020) == 0x00000020); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional string magic = 8000; @@ -17277,7 +17484,7 @@ public Builder setMagic( if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; magic_ = value; onChanged(); return this; @@ -17290,7 +17497,7 @@ public Builder setMagic( * */ public Builder clearMagic() { - bitField0_ = (bitField0_ & ~0x00000020); + bitField0_ = (bitField0_ & ~0x00000040); magic_ = getDefaultInstance().getMagic(); onChanged(); return this; @@ -17307,7 +17514,7 @@ public Builder setMagicBytes( if (value == null) { throw new NullPointerException(); } - bitField0_ |= 0x00000020; + bitField0_ |= 0x00000040; magic_ = value; onChanged(); return this; @@ -17513,13 +17720,14 @@ public Builder setMagicBytes( "em\022\024\n\014numberOfRows\030\006 \001(\004\022F\n\nstatistics\030\007" + " \003(\01322.org.apache.hadoop.hive.ql.io.orc." + "ColumnStatistics\022\026\n\016rowIndexStride\030\010 \001(\r" + - "\"\305\001\n\nPostScript\022\024\n\014footerLength\030\001 \001(\004\022F\n" + + "\"\334\001\n\nPostScript\022\024\n\014footerLength\030\001 \001(\004\022F\n" + "\013compression\030\002 \001(\01621.org.apache.hadoop.h" + "ive.ql.io.orc.CompressionKind\022\034\n\024compres" + "sionBlockSize\030\003 \001(\004\022\023\n\007version\030\004 \003(\rB\002\020\001" + - "\022\026\n\016metadataLength\030\005 \001(\004\022\016\n\005magic\030\300> \001(\t", - "*:\n\017CompressionKind\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022" + - "\n\n\006SNAPPY\020\002\022\007\n\003LZO\020\003" + "\022\026\n\016metadataLength\030\005 \001(\004\022\025\n\rwriterVersio", + "n\030\006 \001(\r\022\016\n\005magic\030\300> \001(\t*:\n\017CompressionKi" + + "nd\022\010\n\004NONE\020\000\022\010\n\004ZLIB\020\001\022\n\n\006SNAPPY\020\002\022\007\n\003LZ" + + "O\020\003" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -17651,7 +17859,7 @@ public Builder setMagicBytes( internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_org_apache_hadoop_hive_ql_io_orc_PostScript_descriptor, - new java.lang.String[] { "FooterLength", "Compression", "CompressionBlockSize", "Version", "MetadataLength", "Magic", }); + new java.lang.String[] { "FooterLength", "Compression", "CompressionBlockSize", "Version", "MetadataLength", "WriterVersion", "Magic", }); return null; } }; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java index 107eb9d..6c80a14 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java @@ -212,14 +212,13 @@ public void closeOp(boolean abort) throws HiveException { // move any incompatible files to final path if (!incompatFileSet.isEmpty()) { for (Path incompatFile : incompatFileSet) { - String fileName = incompatFile.getName(); - Path destFile = new Path(finalPath.getParent(), fileName); + Path destDir = finalPath.getParent(); try { - Utilities.renameOrMoveFiles(fs, incompatFile, destFile); + Utilities.renameOrMoveFiles(fs, incompatFile, destDir); LOG.info("Moved incompatible file " + incompatFile + " to " + - destFile); + destDir); } catch (HiveException e) { - LOG.error("Unable to move " + incompatFile + " to " + destFile); + LOG.error("Unable to move " + incompatFile + " to " + destDir); throw new IOException(e); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java index 6a06668..1da8933 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java @@ -438,8 +438,11 @@ private int compareKeys(List k1, List k2) { WritableComparable key_1 = (WritableComparable) k1.get(i); WritableComparable key_2 = (WritableComparable) k2.get(i); if (key_1 == null && key_2 == null) { - return nullsafes != null && nullsafes[i] ? 0 : -1; // just return k1 is - // smaller than k2 + if (nullsafes != null && nullsafes[i]) { + continue; + } else { + return -1; + } } else if (key_1 == null) { return -1; } else if (key_2 == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 1655f3d..e54b80e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -741,8 +741,7 @@ private void writeListToFileAfterSort(List entries, String resFile) thro Collections.sort(entries); StringBuilder sb = new StringBuilder(); for(String entry : entries){ - sb.append(entry); - sb.append((char)terminator); + appendNonNull(sb, entry, true); } writeToFile(sb.toString(), resFile); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 4a8ca47..4fb30bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -1135,7 +1135,9 @@ private void publishStats() throws HiveException { String postfix=null; if (taskIndependent) { // key = "database.table/SP/DP/"LB/ - prefix = conf.getTableInfo().getTableName(); + // Hive store lowercase table name in metastore, and Counters is character case sensitive, so we + // use lowercase table name as prefix here, as StatsTask get table name from metastore to fetch counter. + prefix = conf.getTableInfo().getTableName().toLowerCase(); } else { // key = "prefix/SP/DP/"LB/taskID/ prefix = conf.getStatsAggPrefix(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java index c4bdaa0..7602740 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableLoader.java @@ -33,6 +33,6 @@ void init(ExecMapperContext context, Configuration hconf, MapJoinOperator joinOp); - void load(MapJoinTableContainer[] mapJoinTables, MapJoinTableContainerSerDe[] mapJoinTableSerdes) - throws HiveException; + void load(MapJoinTableContainer[] mapJoinTables, + MapJoinTableContainerSerDe[] mapJoinTableSerdes, long memUsage) throws HiveException; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index 4a82845..ff42591 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -26,6 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.HashTableLoaderFactory; +import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionHandler; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinKey; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinRowContainer; @@ -187,7 +188,9 @@ private void loadHashTable() throws HiveException { } perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.LOAD_HASHTABLE); loader.init(getExecContext(), hconf, this); - loader.load(mapJoinTables, mapJoinTableSerdes); + long memUsage = (long)(MapJoinMemoryExhaustionHandler.getMaxHeapSize() + * conf.getHashTableMemoryUsage()); + loader.load(mapJoinTables, mapJoinTableSerdes, memUsage); if (!conf.isBucketMapJoin()) { /* * The issue with caching in case of bucket map join is that different tasks diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFTopNHash.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFTopNHash.java index de7d71b..f93b420 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFTopNHash.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFTopNHash.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.exec; import java.io.IOException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java index 547c2bc..c5d8aea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mapjoin/MapJoinMemoryExhaustionHandler.java @@ -55,22 +55,30 @@ public MapJoinMemoryExhaustionHandler(LogHelper console, double maxMemoryUsage) this.console = console; this.maxMemoryUsage = maxMemoryUsage; this.memoryMXBean = ManagementFactory.getMemoryMXBean(); - long maxHeapSize = memoryMXBean.getHeapMemoryUsage().getMax(); + this.maxHeapSize = getMaxHeapSize(memoryMXBean); + percentageNumberFormat = NumberFormat.getInstance(); + percentageNumberFormat.setMinimumFractionDigits(2); + LOG.info("JVM Max Heap Size: " + this.maxHeapSize); + } + + public static long getMaxHeapSize() { + return getMaxHeapSize(ManagementFactory.getMemoryMXBean()); + } + + private static long getMaxHeapSize(MemoryMXBean bean) { + long maxHeapSize = bean.getHeapMemoryUsage().getMax(); /* * According to the javadoc, getMax() can return -1. In this case * default to 200MB. This will probably never actually happen. */ if(maxHeapSize == -1) { - this.maxHeapSize = 200L * 1024L * 1024L; LOG.warn("MemoryMXBean.getHeapMemoryUsage().getMax() returned -1, " + "defaulting maxHeapSize to 200MB"); - } else { - this.maxHeapSize = maxHeapSize; + return 200L * 1024L * 1024L; } - percentageNumberFormat = NumberFormat.getInstance(); - percentageNumberFormat.setMinimumFractionDigits(2); - LOG.info("JVM Max Heap Size: " + this.maxHeapSize); + return maxHeapSize; } + /** * Throws MapJoinMemoryExhaustionException when the JVM has consumed the * configured percentage of memory. The arguments are used simply for the error diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java index 1ba1518..9581b72 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HashTableLoader.java @@ -72,7 +72,7 @@ public void init(ExecMapperContext context, Configuration hconf, MapJoinOperator @Override public void load( MapJoinTableContainer[] mapJoinTables, - MapJoinTableContainerSerDe[] mapJoinTableSerdes) throws HiveException { + MapJoinTableContainerSerDe[] mapJoinTableSerdes, long memUsage) throws HiveException { String currentInputPath = context.getCurrentInputPath().toString(); LOG.info("******* Load from HashTable for input file: " + currentInputPath); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java index 8b674ea..8d3e3cc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java @@ -149,13 +149,27 @@ /** We have 39 bits to store list pointer from the first record; this is size limit */ final static long MAX_WB_SIZE = ((long)1) << 38; + /** 8 Gb of refs is the max capacity if memory limit is not specified. If someone has 100s of + * Gbs of memory (this might happen pretty soon) we'd need to string together arrays anyway. */ + private final static int DEFAULT_MAX_CAPACITY = 1024 * 1024 * 1024; - public BytesBytesMultiHashMap(int initialCapacity, float loadFactor, int wbSize) { + public BytesBytesMultiHashMap(int initialCapacity, + float loadFactor, int wbSize, long memUsage, int defaultCapacity) { if (loadFactor < 0 || loadFactor > 1) { throw new AssertionError("Load factor must be between (0, 1]."); } + assert initialCapacity > 0; initialCapacity = (Long.bitCount(initialCapacity) == 1) ? initialCapacity : nextHighestPowerOfTwo(initialCapacity); + // 8 bytes per long in the refs, assume data will be empty. This is just a sanity check. + int maxCapacity = (memUsage <= 0) ? DEFAULT_MAX_CAPACITY + : (int)Math.min((long)DEFAULT_MAX_CAPACITY, memUsage / 8); + if (maxCapacity < initialCapacity || initialCapacity <= 0) { + // Either initialCapacity is too large, or nextHighestPowerOfTwo overflows + initialCapacity = (Long.bitCount(maxCapacity) == 1) + ? maxCapacity : nextLowestPowerOfTwo(maxCapacity); + } + validateCapacity(initialCapacity); startingHashBitCount = 63 - Long.numberOfLeadingZeros(initialCapacity); this.loadFactor = loadFactor; @@ -164,6 +178,11 @@ public BytesBytesMultiHashMap(int initialCapacity, float loadFactor, int wbSize) resizeThreshold = (int)(initialCapacity * this.loadFactor); } + @VisibleForTesting + BytesBytesMultiHashMap(int initialCapacity, float loadFactor, int wbSize) { + this(initialCapacity, loadFactor, wbSize, -1, 100000); + } + /** The source of keys and values to put into hashtable; avoids byte copying. */ public static interface KvSource { /** Write key into output. */ @@ -644,6 +663,10 @@ private static int nextHighestPowerOfTwo(int v) { return Integer.highestOneBit(v) << 1; } + private static int nextLowestPowerOfTwo(int v) { + return Integer.highestOneBit(v); + } + @VisibleForTesting int getCapacity() { return refs.length; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java index e84e65e..28f6c63 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.exec.persistence; @@ -60,17 +78,20 @@ private final List EMPTY_LIST = new ArrayList(0); public MapJoinBytesTableContainer(Configuration hconf, - MapJoinObjectSerDeContext valCtx, long keyCount) throws SerDeException { + MapJoinObjectSerDeContext valCtx, long keyCount, long memUsage) throws SerDeException { this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE), valCtx, keyCount); + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE), + valCtx, keyCount, memUsage); } private MapJoinBytesTableContainer(float keyCountAdj, int threshold, float loadFactor, - int wbSize, MapJoinObjectSerDeContext valCtx, long keyCount) throws SerDeException { - threshold = HashMapWrapper.calculateTableSize(keyCountAdj, threshold, loadFactor, keyCount); - hashMap = new BytesBytesMultiHashMap(threshold, loadFactor, wbSize); + int wbSize, MapJoinObjectSerDeContext valCtx, long keyCount, long memUsage) + throws SerDeException { + int newThreshold = HashMapWrapper.calculateTableSize( + keyCountAdj, threshold, loadFactor, keyCount); + hashMap = new BytesBytesMultiHashMap(newThreshold, loadFactor, wbSize, memUsage, threshold); } private LazyBinaryStructObjectInspector createInternalOi( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java index 95dad90..2895d80 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/spark/HashTableLoader.java @@ -65,7 +65,7 @@ public void init(ExecMapperContext context, Configuration hconf, MapJoinOperator @Override public void load( MapJoinTableContainer[] mapJoinTables, - MapJoinTableContainerSerDe[] mapJoinTableSerdes) throws HiveException { + MapJoinTableContainerSerDe[] mapJoinTableSerdes, long memUsage) throws HiveException { String currentInputPath = context.getCurrentInputPath().toString(); LOG.info("******* Load from HashTable for input file: " + currentInputPath); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java index 7a9f481..105a3db 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionHandler; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.exec.persistence.HashMapWrapper; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinBytesTableContainer; @@ -69,7 +70,7 @@ public void init(ExecMapperContext context, Configuration hconf, MapJoinOperator @Override public void load( MapJoinTableContainer[] mapJoinTables, - MapJoinTableContainerSerDe[] mapJoinTableSerdes) throws HiveException { + MapJoinTableContainerSerDe[] mapJoinTableSerdes, long memUsage) throws HiveException { TezContext tezContext = (TezContext) MapredContext.get(); Map parentToInput = desc.getParentToInput(); @@ -106,7 +107,7 @@ public void load( Long keyCountObj = parentKeyCounts.get(pos); long keyCount = (keyCountObj == null) ? -1 : keyCountObj.longValue(); MapJoinTableContainer tableContainer = useOptimizedTables - ? new MapJoinBytesTableContainer(hconf, valCtx, keyCount) + ? new MapJoinBytesTableContainer(hconf, valCtx, keyCount, memUsage) : new HashMapWrapper(hconf, keyCount); while (kvReader.next()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java index 7cec650..dea3460 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezJobMonitor.java @@ -45,6 +45,8 @@ import org.apache.tez.dag.api.client.VertexStatus; import org.fusesource.jansi.Ansi; +import com.google.common.base.Preconditions; + import java.io.IOException; import java.io.PrintStream; import java.text.DecimalFormat; @@ -132,6 +134,11 @@ public void run() { }); } + public static void initShutdownHook() { + Preconditions.checkNotNull(shutdownList, + "Shutdown hook was not properly initialized"); + } + public TezJobMonitor() { console = SessionState.getConsole(); secondsFormat = new DecimalFormat("#0.00"); @@ -290,6 +297,7 @@ public int monitorExecution(final DAGClient dagClient, HiveTxnManager txnMgr, Hi break; case INITING: console.printInfo("Status: Initializing"); + startTime = System.currentTimeMillis(); break; case RUNNING: if (!running) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java index bb99357..65a0090 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java @@ -187,6 +187,7 @@ public void open(HiveConf conf, String[] additionalFiles) LOG.info("Opening new Tez Session (id: " + sessionId + ", scratch dir: " + tezScratchDir + ")"); + TezJobMonitor.initShutdownHook(); session.start(); if (HiveConf.getBoolVar(conf, ConfVars.HIVE_PREWARM_ENABLED)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 97684a8..fc83a86 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Set; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.Context; @@ -170,7 +171,8 @@ public int execute(DriverContext driverContext) { counters = client.getDAGStatus(statusGetOpts).getDAGCounters(); TezSessionPoolManager.getInstance().returnSession(session); - if (LOG.isInfoEnabled() && counters != null) { + if (LOG.isInfoEnabled() && counters != null + && conf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY)) { for (CounterGroup group: counters) { LOG.info(group.getDisplayName() +":"); for (TezCounter counter: group) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java index cdc929a..4db825b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/ATSHook.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.exec.ExplainTask; import org.apache.hadoop.hive.ql.exec.Task; @@ -89,6 +90,8 @@ public void run() { @Override public void run(final HookContext hookContext) throws Exception { final long currentTime = System.currentTimeMillis(); + final HiveConf conf = new HiveConf(hookContext.getConf()); + executor.submit(new Runnable() { @Override public void run() { @@ -110,19 +113,19 @@ public void run() { switch(hookContext.getHookType()) { case PRE_EXEC_HOOK: ExplainTask explain = new ExplainTask(); - explain.initialize(hookContext.getConf(), plan, null); + explain.initialize(conf, plan, null); String query = plan.getQueryStr(); List> rootTasks = plan.getRootTasks(); JSONObject explainPlan = explain.getJSONPlan(null, null, rootTasks, plan.getFetchTask(), true, false, false); - fireAndForget(hookContext.getConf(), createPreHookEvent(queryId, query, + fireAndForget(conf, createPreHookEvent(queryId, query, explainPlan, queryStartTime, user, numMrJobs, numTezJobs)); break; case POST_EXEC_HOOK: - fireAndForget(hookContext.getConf(), createPostHookEvent(queryId, currentTime, user, true)); + fireAndForget(conf, createPostHookEvent(queryId, currentTime, user, true)); break; case ON_FAILURE_HOOK: - fireAndForget(hookContext.getConf(), createPostHookEvent(queryId, currentTime, user, false)); + fireAndForget(conf, createPostHookEvent(queryId, currentTime, user, false)); break; default: //ignore diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java index 65b5ca8..3235b0e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ColumnStatisticsImpl.java @@ -394,7 +394,8 @@ void merge(ColumnStatisticsImpl other) { } else if (str.minimum != null) { if (minimum.compareTo(str.minimum) > 0) { minimum = new Text(str.getMinimum()); - } else if (maximum.compareTo(str.maximum) < 0) { + } + if (maximum.compareTo(str.maximum) < 0) { maximum = new Text(str.getMaximum()); } } @@ -563,7 +564,8 @@ void merge(ColumnStatisticsImpl other) { } else if (dec.minimum != null) { if (minimum.compareTo(dec.minimum) > 0) { minimum = dec.minimum; - } else if (maximum.compareTo(dec.maximum) < 0) { + } + if (maximum.compareTo(dec.maximum) < 0) { maximum = dec.maximum; } if (sum == null || dec.sum == null) { @@ -671,7 +673,8 @@ void merge(ColumnStatisticsImpl other) { } else if (dateStats.minimum != null) { if (minimum > dateStats.minimum) { minimum = dateStats.minimum; - } else if (maximum < dateStats.maximum) { + } + if (maximum < dateStats.maximum) { maximum = dateStats.maximum; } } @@ -767,7 +770,8 @@ void merge(ColumnStatisticsImpl other) { } else if (timestampStats.minimum != null) { if (minimum > timestampStats.minimum) { minimum = timestampStats.minimum; - } else if (maximum < timestampStats.maximum) { + } + if (maximum < timestampStats.maximum) { maximum = timestampStats.maximum; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java index 6b230e9..845e2e6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java @@ -65,6 +65,8 @@ public static void main(String[] args) throws Exception { System.out.println("Structure for " + filename); Path path = new Path(filename); Reader reader = OrcFile.createReader(path, OrcFile.readerOptions(conf)); + System.out.println("File Version: " + reader.getFileVersion().getName() + + " with " + reader.getWriterVersion()); RecordReaderImpl rows = (RecordReaderImpl) reader.rows(); System.out.println("Rows: " + reader.getNumberOfRows()); System.out.println("Compression: " + reader.getCompression()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java index 39326c9..b46937c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java @@ -97,6 +97,26 @@ public int getMinor() { } } + /** + * Records the version of the writer in terms of which bugs have been fixed. + * For bugs in the writer, but the old readers already read the new data + * correctly, bump this version instead of the Version. + */ + public static enum WriterVersion { + ORIGINAL(0), + HIVE_8732(1); // corrupted stripe/file maximum column statistics + + private final int id; + + public int getId() { + return id; + } + + private WriterVersion(int id) { + this.id = id; + } + } + public static enum EncodingStrategy { SPEED, COMPRESSION; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 9340ce6..3ed6be2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -630,6 +630,7 @@ private FileInfo verifyCachedFileInfo(FileStatus file) { private final boolean isOriginal; private final List deltas; private final boolean hasBase; + private OrcFile.WriterVersion writerVersion; SplitGenerator(Context context, FileSystem fs, FileStatus file, FileInfo fileInfo, @@ -775,7 +776,9 @@ public void run() { Reader.Options options = new Reader.Options(); setIncludedColumns(options, types, context.conf, isOriginal); setSearchArgument(options, types, context.conf, isOriginal); - if (options.getSearchArgument() != null) { + // only do split pruning if HIVE-8732 has been fixed in the writer + if (options.getSearchArgument() != null && + writerVersion != OrcFile.WriterVersion.ORIGINAL) { SearchArgument sarg = options.getSearchArgument(); List sargLeaves = sarg.getLeaves(); List stripeStats = metadata.getStripeStatistics(); @@ -866,6 +869,7 @@ private void populateAndCacheStripeDetails() { fileMetaInfo = fileInfo.fileMetaInfo; metadata = fileInfo.metadata; types = fileInfo.types; + writerVersion = fileInfo.writerVersion; // For multiple runs, in case sendSplitsInFooter changes if (fileMetaInfo == null && context.footerInSplits) { orcReader = OrcFile.createReader(file.getPath(), @@ -873,6 +877,7 @@ private void populateAndCacheStripeDetails() { fileInfo.fileMetaInfo = ((ReaderImpl) orcReader).getFileMetaInfo(); fileInfo.metadata = orcReader.getMetadata(); fileInfo.types = orcReader.getTypes(); + fileInfo.writerVersion = orcReader.getWriterVersion(); } } else { orcReader = OrcFile.createReader(file.getPath(), @@ -880,13 +885,14 @@ private void populateAndCacheStripeDetails() { stripes = orcReader.getStripes(); metadata = orcReader.getMetadata(); types = orcReader.getTypes(); + writerVersion = orcReader.getWriterVersion(); fileMetaInfo = context.footerInSplits ? ((ReaderImpl) orcReader).getFileMetaInfo() : null; if (context.cacheStripeDetails) { // Populate into cache. Context.footerCache.put(file.getPath(), new FileInfo(file.getModificationTime(), file.getLen(), stripes, - metadata, types, fileMetaInfo)); + metadata, types, fileMetaInfo, writerVersion)); } } } catch (Throwable th) { @@ -981,18 +987,21 @@ private boolean isStripeSatisfyPredicate(StripeStatistics stripeStatistics, ReaderImpl.FileMetaInfo fileMetaInfo; Metadata metadata; List types; + private OrcFile.WriterVersion writerVersion; FileInfo(long modificationTime, long size, List stripeInfos, Metadata metadata, List types, - ReaderImpl.FileMetaInfo fileMetaInfo) { + ReaderImpl.FileMetaInfo fileMetaInfo, + OrcFile.WriterVersion writerVersion) { this.modificationTime = modificationTime; this.size = size; this.stripeInfos = stripeInfos; this.fileMetaInfo = fileMetaInfo; this.metadata = metadata; this.types = types; + this.writerVersion = writerVersion; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java index edf5e8e..da23544 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java @@ -38,6 +38,7 @@ private boolean isOriginal; private boolean hasBase; private final List deltas = new ArrayList(); + private OrcFile.WriterVersion writerVersion; protected OrcNewSplit(){ //The FileSplit() constructor in hadoop 0.20 and 1.x is package private so can't use it. @@ -83,6 +84,7 @@ public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, footerBuff.limit() - footerBuff.position()); out.write(footerBuff.array(), footerBuff.position(), footerBuff.limit() - footerBuff.position()); + WritableUtils.writeVInt(out, fileMetaInfo.writerVersion.getId()); } } @@ -111,9 +113,11 @@ public void readFields(DataInput in) throws IOException { int footerBuffSize = WritableUtils.readVInt(in); ByteBuffer footerBuff = ByteBuffer.allocate(footerBuffSize); in.readFully(footerBuff.array(), 0, footerBuffSize); + OrcFile.WriterVersion writerVersion = + ReaderImpl.getWriterVersion(WritableUtils.readVInt(in)); fileMetaInfo = new ReaderImpl.FileMetaInfo(compressionType, bufferSize, - metadataSize, footerBuff); + metadataSize, footerBuff, writerVersion); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java index 48160c1..84192d5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java @@ -42,6 +42,7 @@ private boolean isOriginal; private boolean hasBase; private final List deltas = new ArrayList(); + private OrcFile.WriterVersion writerVersion; static final int BASE_FLAG = 4; static final int ORIGINAL_FLAG = 2; @@ -92,6 +93,7 @@ public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, footerBuff.limit() - footerBuff.position()); out.write(footerBuff.array(), footerBuff.position(), footerBuff.limit() - footerBuff.position()); + WritableUtils.writeVInt(out, fileMetaInfo.writerVersion.getId()); } } @@ -120,9 +122,11 @@ public void readFields(DataInput in) throws IOException { int footerBuffSize = WritableUtils.readVInt(in); ByteBuffer footerBuff = ByteBuffer.allocate(footerBuffSize); in.readFully(footerBuff.array(), 0, footerBuffSize); + OrcFile.WriterVersion writerVersion = + ReaderImpl.getWriterVersion(WritableUtils.readVInt(in)); fileMetaInfo = new ReaderImpl.FileMetaInfo(compressionType, bufferSize, - metadataSize, footerBuff); + metadataSize, footerBuff, writerVersion); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java index df5afd1..f85c21b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/Reader.java @@ -129,6 +129,16 @@ List getTypes(); /** + * Get the file format version. + */ + OrcFile.Version getFileVersion(); + + /** + * Get the version of the writer of this file. + */ + OrcFile.WriterVersion getWriterVersion(); + + /** * Options for creating a RecordReader. */ public static class Options { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java index 13f79df..03f8085 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ReaderImpl.java @@ -62,6 +62,7 @@ private long deserializedSize = -1; private final Configuration conf; private final List versionList; + private final OrcFile.WriterVersion writerVersion; //serialized footer - Keeping this around for use by getFileMetaInfo() // will help avoid cpu cycles spend in deserializing at cost of increased @@ -182,6 +183,22 @@ public long getContentLength() { } @Override + public OrcFile.Version getFileVersion() { + for (OrcFile.Version version: OrcFile.Version.values()) { + if (version.getMajor() == versionList.get(0) && + version.getMinor() == versionList.get(1)) { + return version; + } + } + return OrcFile.Version.V_0_11; + } + + @Override + public OrcFile.WriterVersion getWriterVersion() { + return writerVersion; + } + + @Override public int getRowIndexStride() { return footer.getRowIndexStride(); } @@ -309,8 +326,22 @@ static void checkOrcVersion(Log log, Path path, List version) { this.footer = rInfo.footer; this.inspector = rInfo.inspector; this.versionList = footerMetaData.versionList; + this.writerVersion = footerMetaData.writerVersion; } + /** + * Get the WriterVersion based on the ORC file postscript. + * @param writerVersion the integer writer version + * @return + */ + static OrcFile.WriterVersion getWriterVersion(int writerVersion) { + for(OrcFile.WriterVersion version: OrcFile.WriterVersion.values()) { + if (version.getId() == writerVersion) { + return version; + } + } + return OrcFile.WriterVersion.ORIGINAL; + } private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, Path path, @@ -346,6 +377,12 @@ private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, int footerSize = (int) ps.getFooterLength(); int metadataSize = (int) ps.getMetadataLength(); + OrcFile.WriterVersion writerVersion; + if (ps.hasWriterVersion()) { + writerVersion = getWriterVersion(ps.getWriterVersion()); + } else { + writerVersion = OrcFile.WriterVersion.ORIGINAL; + } //check compression codec switch (ps.getCompression()) { @@ -391,7 +428,8 @@ private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, (int) ps.getCompressionBlockSize(), (int) ps.getMetadataLength(), buffer, - ps.getVersionList() + ps.getVersionList(), + writerVersion ); } @@ -451,25 +489,29 @@ private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, final int metadataSize; final ByteBuffer footerBuffer; final List versionList; + final OrcFile.WriterVersion writerVersion; FileMetaInfo(String compressionType, int bufferSize, int metadataSize, - ByteBuffer footerBuffer) { - this(compressionType, bufferSize, metadataSize, footerBuffer, null); + ByteBuffer footerBuffer, OrcFile.WriterVersion writerVersion) { + this(compressionType, bufferSize, metadataSize, footerBuffer, null, + writerVersion); } FileMetaInfo(String compressionType, int bufferSize, int metadataSize, - ByteBuffer footerBuffer, List versionList){ + ByteBuffer footerBuffer, List versionList, + OrcFile.WriterVersion writerVersion){ this.compressionType = compressionType; this.bufferSize = bufferSize; this.metadataSize = metadataSize; this.footerBuffer = footerBuffer; this.versionList = versionList; + this.writerVersion = writerVersion; } } public FileMetaInfo getFileMetaInfo(){ return new FileMetaInfo(compressionKind.toString(), bufferSize, - metadataSize, footerByteBuffer, versionList); + metadataSize, footerByteBuffer, versionList, writerVersion); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java index f8fa316..a6a0ec1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java @@ -2364,20 +2364,21 @@ static TruthValue evaluatePredicate(OrcProto.ColumnStatistics index, PredicateLeaf predicate) { ColumnStatistics cs = ColumnStatisticsImpl.deserialize(index); Object minValue = getMin(cs); + Object maxValue = getMax(cs); + return evaluatePredicateRange(predicate, minValue, maxValue); + } + + static TruthValue evaluatePredicateRange(PredicateLeaf predicate, Object min, + Object max) { // if we didn't have any values, everything must have been null - if (minValue == null) { + if (min == null) { if (predicate.getOperator() == PredicateLeaf.Operator.IS_NULL) { return TruthValue.YES; } else { return TruthValue.NULL; } } - Object maxValue = getMax(cs); - return evaluatePredicateRange(predicate, minValue, maxValue); - } - static TruthValue evaluatePredicateRange(PredicateLeaf predicate, Object min, - Object max) { Location loc; try { // Predicate object and stats object can be one of the following base types diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TimestampColumnStatistics.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TimestampColumnStatistics.java index 9073f4f..ef42f50 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TimestampColumnStatistics.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/TimestampColumnStatistics.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.io.orc; import java.sql.Timestamp; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java index 620d4d4..9e69de6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/WriterImpl.java @@ -2230,7 +2230,8 @@ private int writePostScript(int footerLength, int metadataLength) throws IOExcep .setMetadataLength(metadataLength) .setMagic(OrcFile.MAGIC) .addVersion(version.getMajor()) - .addVersion(version.getMinor()); + .addVersion(version.getMinor()) + .setWriterVersion(OrcFile.WriterVersion.HIVE_8732.getId()); if (compress != CompressionKind.NONE) { builder.setCompressionBlockSize(bufferSize); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java index d63fc72..c57dd99 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java @@ -63,6 +63,8 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; +import parquet.hadoop.ParquetOutputFormat; +import parquet.hadoop.ParquetWriter; import parquet.io.api.Binary; /** @@ -70,13 +72,18 @@ * A ParquetHiveSerDe for Hive (with the deprecated package mapred) * */ -@SerDeSpec(schemaProps = {serdeConstants.LIST_COLUMNS, serdeConstants.LIST_COLUMN_TYPES}) +@SerDeSpec(schemaProps = {serdeConstants.LIST_COLUMNS, serdeConstants.LIST_COLUMN_TYPES, + ParquetOutputFormat.COMPRESSION}) public class ParquetHiveSerDe extends AbstractSerDe { public static final Text MAP_KEY = new Text("key"); public static final Text MAP_VALUE = new Text("value"); public static final Text MAP = new Text("map"); public static final Text ARRAY = new Text("bag"); + // default compression type for parquet output format + private static final String DEFAULTCOMPRESSION = + ParquetWriter.DEFAULT_COMPRESSION_CODEC_NAME.name(); + // Map precision to the number bytes needed for binary conversion. public static final int PRECISION_TO_BYTE_COUNT[] = new int[38]; static { @@ -99,6 +106,7 @@ private LAST_OPERATION status; private long serializedSize; private long deserializedSize; + private String compressionType; @Override public final void initialize(final Configuration conf, final Properties tbl) throws SerDeException { @@ -110,6 +118,9 @@ public final void initialize(final Configuration conf, final Properties tbl) thr final String columnNameProperty = tbl.getProperty(serdeConstants.LIST_COLUMNS); final String columnTypeProperty = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES); + // Get compression properties + compressionType = tbl.getProperty(ParquetOutputFormat.COMPRESSION, DEFAULTCOMPRESSION); + if (columnNameProperty.length() == 0) { columnNames = new ArrayList(); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java index 765b5ac..e52c4bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/write/ParquetRecordWriterWrapper.java @@ -18,10 +18,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapreduce.OutputFormat; @@ -55,21 +57,13 @@ public ParquetRecordWriterWrapper( } taskContext = ContextUtil.newTaskAttemptContext(jobConf, taskAttemptID); + LOG.info("initialize serde with table properties."); + initializeSerProperties(taskContext, tableProperties); + LOG.info("creating real writer to write at " + name); - String compressionName = tableProperties.getProperty(ParquetOutputFormat.COMPRESSION); - if (compressionName != null && !compressionName.isEmpty()) { - //get override compression properties via "tblproperties" clause if it is set - LOG.debug("get override compression properties via tblproperties"); - - ContextUtil.getConfiguration(taskContext); - CompressionCodecName codecName = CompressionCodecName.fromConf(compressionName); - realWriter = ((ParquetOutputFormat) realOutputFormat).getRecordWriter(jobConf, - new Path(name), codecName); - } else { - realWriter = ((ParquetOutputFormat) realOutputFormat).getRecordWriter(taskContext, - new Path(name)); - } + realWriter = + ((ParquetOutputFormat) realOutputFormat).getRecordWriter(taskContext, new Path(name)); LOG.info("real writer: " + realWriter); } catch (final InterruptedException e) { @@ -77,6 +71,31 @@ public ParquetRecordWriterWrapper( } } + private void initializeSerProperties(JobContext job, Properties tableProperties) { + String blockSize = tableProperties.getProperty(ParquetOutputFormat.BLOCK_SIZE); + Configuration conf = ContextUtil.getConfiguration(job); + if (blockSize != null && !blockSize.isEmpty()) { + LOG.debug("get override parquet.block.size property via tblproperties"); + conf.setInt(ParquetOutputFormat.BLOCK_SIZE, Integer.valueOf(blockSize)); + } + + String enableDictionaryPage = + tableProperties.getProperty(ParquetOutputFormat.ENABLE_DICTIONARY); + if (enableDictionaryPage != null && !enableDictionaryPage.isEmpty()) { + LOG.debug("get override parquet.enable.dictionary property via tblproperties"); + conf.setBoolean(ParquetOutputFormat.ENABLE_DICTIONARY, + Boolean.valueOf(enableDictionaryPage)); + } + + String compressionName = tableProperties.getProperty(ParquetOutputFormat.COMPRESSION); + if (compressionName != null && !compressionName.isEmpty()) { + //get override compression properties via "tblproperties" clause if it is set + LOG.debug("get override compression properties via tblproperties"); + CompressionCodecName codecName = CompressionCodecName.fromConf(compressionName); + conf.set(ParquetOutputFormat.COMPRESSION, codecName.name()); + } + } + @Override public void close(final Reporter reporter) throws IOException { try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java index 8ffd1a1..f4a2e65 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentFactory.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.io.sarg; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java b/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java index aac31fa..9d2090d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lib/ForwardWalker.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.lib; import org.apache.hadoop.hive.ql.exec.Operator; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index b900627..ee8d295 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -92,6 +92,7 @@ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -748,8 +749,9 @@ public void createIndex(String tableName, String indexName, String indexHandlerC throw new HiveException("Table name " + indexTblName + " already exists. Choose another name."); } - org.apache.hadoop.hive.metastore.api.StorageDescriptor storageDescriptor = baseTbl.getSd().deepCopy(); - SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo(); + SerDeInfo serdeInfo = new SerDeInfo(); + serdeInfo.setName(indexTblName); + if(serde != null) { serdeInfo.setSerializationLib(serde); } else { @@ -762,6 +764,7 @@ public void createIndex(String tableName, String indexName, String indexHandlerC } } + serdeInfo.setParameters(new HashMap()); if (fieldDelim != null) { serdeInfo.getParameters().put(FIELD_DELIM, fieldDelim); serdeInfo.getParameters().put(SERIALIZATION_FORMAT, fieldDelim); @@ -788,18 +791,8 @@ public void createIndex(String tableName, String indexName, String indexHandlerC } } - storageDescriptor.setLocation(null); - if (location != null) { - storageDescriptor.setLocation(location); - } - storageDescriptor.setInputFormat(inputFormat); - storageDescriptor.setOutputFormat(outputFormat); - - Map params = new HashMap(); - List indexTblCols = new ArrayList(); List sortCols = new ArrayList(); - storageDescriptor.setBucketCols(null); int k = 0; Table metaBaseTbl = new Table(baseTbl); for (int i = 0; i < metaBaseTbl.getCols().size(); i++) { @@ -815,9 +808,6 @@ public void createIndex(String tableName, String indexName, String indexHandlerC "Check the index columns, they should appear in the table being indexed."); } - storageDescriptor.setCols(indexTblCols); - storageDescriptor.setSortCols(sortCols); - int time = (int) (System.currentTimeMillis() / 1000); org.apache.hadoop.hive.metastore.api.Table tt = null; HiveIndexHandler indexHandler = HiveUtils.getIndexHandler(this.getConf(), indexHandlerClass); @@ -851,8 +841,21 @@ public void createIndex(String tableName, String indexName, String indexHandlerC String tdname = Utilities.getDatabaseName(tableName); String ttname = Utilities.getTableName(tableName); + + StorageDescriptor indexSd = new StorageDescriptor( + indexTblCols, + location, + inputFormat, + outputFormat, + false/*compressed - not used*/, + -1/*numBuckets - default is -1 when the table has no buckets*/, + serdeInfo, + null/*bucketCols*/, + sortCols, + null/*parameters*/); + Index indexDesc = new Index(indexName, indexHandlerClass, tdname, ttname, time, time, indexTblName, - storageDescriptor, params, deferredRebuild); + indexSd, new HashMap(), deferredRebuild); if (indexComment != null) { indexDesc.getParameters().put("comment", indexComment); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index ecd376d..f1f723c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.metadata; import java.io.IOException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index bedc3ac..04bafda 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -240,6 +240,9 @@ private void convertJoinSMBJoin(JoinOperator joinOp, OptimizeTezProcContext cont new MapJoinDesc(null, null, joinDesc.getExprs(), null, null, joinDesc.getOutputColumnNames(), mapJoinConversionPos, joinDesc.getConds(), joinDesc.getFilters(), joinDesc.getNoOuterJoin(), null); + mapJoinDesc.setNullSafes(joinDesc.getNullSafes()); + mapJoinDesc.setFilterMap(joinDesc.getFilterMap()); + mapJoinDesc.resetOrder(); } @SuppressWarnings("unchecked") diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java index e644481..5b16e5b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MergeJoinProc.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.optimizer; import java.util.Stack; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/AnnotateWithOpTraits.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/AnnotateWithOpTraits.java index da91d38..c304e97 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/AnnotateWithOpTraits.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/metainfo/annotation/AnnotateWithOpTraits.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.optimizer.metainfo.annotation; import java.util.ArrayList; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java index 9b653d3..4b44a28 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/TraitsUtil.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.optimizer.optiq; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java index 15596bc..5deb801 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/cost/HiveVolcanoPlanner.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.optimizer.optiq.cost; import org.eigenbase.rel.RelCollationTraitDef; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java index ec72047..1c483ea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/rules/HivePushFilterPastJoinRule.java @@ -38,114 +38,121 @@ public abstract class HivePushFilterPastJoinRule extends PushFilterPastJoinRule { - public static final HivePushFilterPastJoinRule FILTER_ON_JOIN = new HivePushFilterIntoJoinRule(); - - public static final HivePushFilterPastJoinRule JOIN = new HivePushDownJoinConditionRule(); - - /** - * Creates a PushFilterPastJoinRule with an explicit root operand. - */ - protected HivePushFilterPastJoinRule(RelOptRuleOperand operand, String id, - boolean smart, RelFactories.FilterFactory filterFactory, - RelFactories.ProjectFactory projectFactory) { - super(operand, id, smart, filterFactory, projectFactory); - } - - /** - * Rule that tries to push filter expressions into a join condition and into - * the inputs of the join. - */ - public static class HivePushFilterIntoJoinRule extends - HivePushFilterPastJoinRule { - public HivePushFilterIntoJoinRule() { - super(RelOptRule.operand(FilterRelBase.class, - RelOptRule.operand(JoinRelBase.class, RelOptRule.any())), - "HivePushFilterPastJoinRule:filter", true, - HiveFilterRel.DEFAULT_FILTER_FACTORY, - HiveProjectRel.DEFAULT_PROJECT_FACTORY); - } - - @Override - public void onMatch(RelOptRuleCall call) { - FilterRelBase filter = call.rel(0); - JoinRelBase join = call.rel(1); - super.perform(call, filter, join); - } - } - - public static class HivePushDownJoinConditionRule extends - HivePushFilterPastJoinRule { - public HivePushDownJoinConditionRule() { - super(RelOptRule.operand(JoinRelBase.class, RelOptRule.any()), - "HivePushFilterPastJoinRule:no-filter", true, - HiveFilterRel.DEFAULT_FILTER_FACTORY, - HiveProjectRel.DEFAULT_PROJECT_FACTORY); - } - - @Override - public void onMatch(RelOptRuleCall call) { - JoinRelBase join = call.rel(0); - super.perform(call, null, join); - } - } - - /* - * Any predicates pushed down to joinFilters that aren't equality - * conditions: put them back as aboveFilters because Hive doesn't support - * not equi join conditions. - */ - @Override - protected void validateJoinFilters(List aboveFilters, - List joinFilters, JoinRelBase join, JoinRelType joinType) { - if (joinType.equals(JoinRelType.INNER)) { - ListIterator filterIter = joinFilters.listIterator(); - while (filterIter.hasNext()) { - RexNode exp = filterIter.next(); - if (exp instanceof RexCall) { - RexCall c = (RexCall) exp; - if (c.getOperator().getKind() == SqlKind.EQUALS) { - boolean validHiveJoinFilter = true; - for (RexNode rn : c.getOperands()) { - // NOTE: Hive dis-allows projections from both left - // & - // right side - // of join condition. Example: Hive disallows - // (r1.x=r2.x)=(r1.y=r2.y) on join condition. - if (filterRefersToBothSidesOfJoin(rn, join)) { - validHiveJoinFilter = false; - break; - } - } - if (validHiveJoinFilter) - continue; - } - } - aboveFilters.add(exp); - filterIter.remove(); - } - } - } - - private boolean filterRefersToBothSidesOfJoin(RexNode filter, JoinRelBase j) { - boolean refersToBothSides = false; - - int joinNoOfProjects = j.getRowType().getFieldCount(); - BitSet filterProjs = new BitSet(joinNoOfProjects); - BitSet allLeftProjs = new BitSet(joinNoOfProjects); - BitSet allRightProjs = new BitSet(joinNoOfProjects); - allLeftProjs.set(0, j.getInput(0).getRowType().getFieldCount(), true); - allRightProjs.set(j.getInput(0).getRowType().getFieldCount(), - joinNoOfProjects, true); - - InputFinder inputFinder = new InputFinder(filterProjs); - filter.accept(inputFinder); - - if (allLeftProjs.intersects(filterProjs) - && allRightProjs.intersects(filterProjs)) - refersToBothSides = true; - - return refersToBothSides; - } + public static final HivePushFilterPastJoinRule FILTER_ON_JOIN = new HivePushFilterIntoJoinRule(); + + public static final HivePushFilterPastJoinRule JOIN = new HivePushDownJoinConditionRule(); + + /** + * Creates a PushFilterPastJoinRule with an explicit root operand. + */ + protected HivePushFilterPastJoinRule(RelOptRuleOperand operand, String id, boolean smart, + RelFactories.FilterFactory filterFactory, RelFactories.ProjectFactory projectFactory) { + super(operand, id, smart, filterFactory, projectFactory); + } + + /** + * Rule that tries to push filter expressions into a join condition and into + * the inputs of the join. + */ + public static class HivePushFilterIntoJoinRule extends HivePushFilterPastJoinRule { + public HivePushFilterIntoJoinRule() { + super(RelOptRule.operand(FilterRelBase.class, + RelOptRule.operand(JoinRelBase.class, RelOptRule.any())), + "HivePushFilterPastJoinRule:filter", true, HiveFilterRel.DEFAULT_FILTER_FACTORY, + HiveProjectRel.DEFAULT_PROJECT_FACTORY); + } + + @Override + public void onMatch(RelOptRuleCall call) { + FilterRelBase filter = call.rel(0); + JoinRelBase join = call.rel(1); + super.perform(call, filter, join); + } + } + + public static class HivePushDownJoinConditionRule extends HivePushFilterPastJoinRule { + public HivePushDownJoinConditionRule() { + super(RelOptRule.operand(JoinRelBase.class, RelOptRule.any()), + "HivePushFilterPastJoinRule:no-filter", true, HiveFilterRel.DEFAULT_FILTER_FACTORY, + HiveProjectRel.DEFAULT_PROJECT_FACTORY); + } + + @Override + public void onMatch(RelOptRuleCall call) { + JoinRelBase join = call.rel(0); + super.perform(call, null, join); + } + } + + /* + * Any predicates pushed down to joinFilters that aren't equality conditions: + * put them back as aboveFilters because Hive doesn't support not equi join + * conditions. + */ + @Override + protected void validateJoinFilters(List aboveFilters, List joinFilters, + JoinRelBase join, JoinRelType joinType) { + if (joinType.equals(JoinRelType.INNER)) { + ListIterator filterIter = joinFilters.listIterator(); + while (filterIter.hasNext()) { + RexNode exp = filterIter.next(); + + if (exp instanceof RexCall) { + RexCall c = (RexCall) exp; + boolean validHiveJoinFilter = false; + + if ((c.getOperator().getKind() == SqlKind.EQUALS)) { + validHiveJoinFilter = true; + for (RexNode rn : c.getOperands()) { + // NOTE: Hive dis-allows projections from both left & right side + // of join condition. Example: Hive disallows + // (r1.x +r2.x)=(r1.y+r2.y) on join condition. + if (filterRefersToBothSidesOfJoin(rn, join)) { + validHiveJoinFilter = false; + break; + } + } + } else if ((c.getOperator().getKind() == SqlKind.LESS_THAN) + || (c.getOperator().getKind() == SqlKind.GREATER_THAN) + || (c.getOperator().getKind() == SqlKind.LESS_THAN_OR_EQUAL) + || (c.getOperator().getKind() == SqlKind.GREATER_THAN_OR_EQUAL)) { + validHiveJoinFilter = true; + // NOTE: Hive dis-allows projections from both left & right side of + // join in in equality condition. Example: Hive disallows (r1.x < + // r2.x) on join condition. + if (filterRefersToBothSidesOfJoin(c, join)) { + validHiveJoinFilter = false; + } + } + + if (validHiveJoinFilter) + continue; + } + + aboveFilters.add(exp); + filterIter.remove(); + } + } + } + + private boolean filterRefersToBothSidesOfJoin(RexNode filter, JoinRelBase j) { + boolean refersToBothSides = false; + + int joinNoOfProjects = j.getRowType().getFieldCount(); + BitSet filterProjs = new BitSet(joinNoOfProjects); + BitSet allLeftProjs = new BitSet(joinNoOfProjects); + BitSet allRightProjs = new BitSet(joinNoOfProjects); + allLeftProjs.set(0, j.getInput(0).getRowType().getFieldCount(), true); + allRightProjs.set(j.getInput(0).getRowType().getFieldCount(), joinNoOfProjects, true); + + InputFinder inputFinder = new InputFinder(filterProjs); + filter.accept(inputFinder); + + if (allLeftProjs.intersects(filterProjs) && allRightProjs.intersects(filterProjs)) + refersToBothSides = true; + + return refersToBothSides; + } } // End PushFilterPastJoinRule.java diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java index ba07363..28bf2ad 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/FilterSelectivityEstimator.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hive.ql.optimizer.optiq.stats; import java.util.BitSet; +import java.util.HashSet; +import java.util.List; +import java.util.Set; import org.apache.hadoop.hive.ql.optimizer.optiq.RelOptHiveTable; import org.apache.hadoop.hive.ql.optimizer.optiq.reloperators.HiveTableScanRel; @@ -32,6 +35,10 @@ import org.eigenbase.rex.RexNode; import org.eigenbase.rex.RexVisitorImpl; import org.eigenbase.sql.SqlKind; +import org.eigenbase.sql.SqlOperator; +import org.eigenbase.sql.type.SqlTypeUtil; + +import com.google.common.collect.Sets; public class FilterSelectivityEstimator extends RexVisitorImpl { private final RelNode childRel; @@ -61,7 +68,7 @@ public Double visitCall(RexCall call) { } Double selectivity = null; - SqlKind op = call.getKind(); + SqlKind op = getOp(call); switch (op) { case AND: { @@ -74,6 +81,7 @@ public Double visitCall(RexCall call) { break; } + case NOT: case NOT_EQUALS: { selectivity = computeNotEqualitySelectivity(call); break; @@ -88,7 +96,16 @@ public Double visitCall(RexCall call) { } case IN: { - selectivity = ((double) 1 / ((double) call.operands.size())); + // TODO: 1) check for duplicates 2) We assume in clause values to be + // present in NDV which may not be correct (Range check can find it) 3) We + // assume values in NDV set is uniformly distributed over col values + // (account for skewness - histogram). + selectivity = computeFunctionSelectivity(call) * (call.operands.size() - 1); + if (selectivity <= 0.0) { + selectivity = 0.10; + } else if (selectivity >= 1.0) { + selectivity = 1.0; + } break; } @@ -152,18 +169,19 @@ private Double computeDisjunctionSelectivity(RexCall call) { } tmpCardinality = childCardinality * tmpSelectivity; - if (tmpCardinality > 1) + if (tmpCardinality > 1 && tmpCardinality < childCardinality) { tmpSelectivity = (1 - tmpCardinality / childCardinality); - else + } else { tmpSelectivity = 1.0; + } selectivity *= tmpSelectivity; } - if (selectivity > 1) - return (1 - selectivity); - else - return 1.0; + if (selectivity < 0.0) + selectivity = 0.0; + + return (1 - selectivity); } /** @@ -225,4 +243,19 @@ private boolean isPartitionPredicate(RexNode expr, RelNode r) { } return false; } + + private SqlKind getOp(RexCall call) { + SqlKind op = call.getKind(); + + if (call.getKind().equals(SqlKind.OTHER_FUNCTION) + && SqlTypeUtil.inBooleanFamily(call.getType())) { + SqlOperator sqlOp = call.getOperator(); + String opName = (sqlOp != null) ? sqlOp.getName() : ""; + if (opName.equalsIgnoreCase("in")) { + op = SqlKind.IN; + } + } + + return op; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java index 3221f91..c3c8bdd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/stats/HiveRelMdUniqueKeys.java @@ -38,6 +38,7 @@ import org.eigenbase.rel.metadata.ReflectiveRelMetadataProvider; import org.eigenbase.rel.metadata.RelMdUniqueKeys; import org.eigenbase.rel.metadata.RelMetadataProvider; +import org.eigenbase.relopt.RelOptUtil; import org.eigenbase.relopt.hep.HepRelVertex; import org.eigenbase.rex.RexInputRef; import org.eigenbase.rex.RexNode; @@ -100,7 +101,7 @@ cStat.getRange().minValue != null) { double r = cStat.getRange().maxValue.doubleValue() - cStat.getRange().minValue.doubleValue() + 1; - isKey = (numRows == r); + isKey = (Math.abs(numRows - r) < RelOptUtil.EPSILON); } if ( isKey ) { BitSet key = new BitSet(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java index c6efff6..7f52c29 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/SqlFunctionConverter.java @@ -278,6 +278,7 @@ private static String getName(GenericUDF hiveUDF) { registerFunction(">=", SqlStdOperatorTable.GREATER_THAN_OR_EQUAL, hToken(HiveParser.GREATERTHANOREQUALTO, ">=")); registerFunction("!", SqlStdOperatorTable.NOT, hToken(HiveParser.KW_NOT, "not")); + registerFunction("<>", SqlStdOperatorTable.NOT_EQUALS, hToken(HiveParser.NOTEQUAL, "<>")); } private void registerFunction(String name, SqlOperator optiqFn, HiveToken hiveToken) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java index 6159c7d..dc5d2df 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartExprEvalUtils.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; public class PartExprEvalUtils { @@ -103,11 +104,13 @@ static synchronized public Object evalExprWithPart(ExprNodeDesc expr, } static synchronized public ObjectPair prepareExpr( - ExprNodeGenericFuncDesc expr, List partNames) throws HiveException { + ExprNodeGenericFuncDesc expr, List partNames, + List partColumnTypeInfos) throws HiveException { // Create the row object List partObjectInspectors = new ArrayList(); for (int i = 0; i < partNames.size(); i++) { - partObjectInspectors.add(PrimitiveObjectInspectorFactory.javaStringObjectInspector); + partObjectInspectors.add(PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector( + partColumnTypeInfos.get(i))); } StructObjectInspector objectInspector = ObjectInspectorFactory .getStandardStructObjectInspector(partNames, partObjectInspectors); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java index d98b5c5..9ffa177 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionExpressionForMetastore.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; /** * The basic implementation of PartitionExpressionProxy that uses ql package classes. @@ -40,13 +41,14 @@ public String convertExprToFilter(byte[] exprBytes) throws MetaException { } @Override - public boolean filterPartitionsByExpr(List columnNames, byte[] exprBytes, + public boolean filterPartitionsByExpr(List partColumnNames, + List partColumnTypeInfos, byte[] exprBytes, String defaultPartitionName, List partitionNames) throws MetaException { ExprNodeGenericFuncDesc expr = deserializeExpr(exprBytes); try { long startTime = System.nanoTime(), len = partitionNames.size(); boolean result = PartitionPruner.prunePartitionNames( - columnNames, expr, defaultPartitionName, partitionNames); + partColumnNames, partColumnTypeInfos, expr, defaultPartitionName, partitionNames); double timeMs = (System.nanoTime() - startTime) / 1000000.0; LOG.debug("Pruning " + len + " partition names took " + timeMs + "ms"); return result; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index 1796b7b..4b2a81a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -57,7 +57,9 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -420,9 +422,10 @@ static private boolean pruneBySequentialScan(Table tab, List partitio String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); List partCols = extractPartColNames(tab); + List partColTypeInfos = extractPartColTypes(tab); boolean hasUnknownPartitions = prunePartitionNames( - partCols, prunerExpr, defaultPartitionName, partNames); + partCols, partColTypeInfos, prunerExpr, defaultPartitionName, partNames); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PRUNE_LISTING); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARTITION_RETRIEVING); @@ -442,19 +445,30 @@ static private boolean pruneBySequentialScan(Table tab, List partitio return partCols; } + private static List extractPartColTypes(Table tab) { + List pCols = tab.getPartCols(); + List partColTypeInfos = new ArrayList(pCols.size()); + for (FieldSchema pCol : pCols) { + partColTypeInfos.add(TypeInfoFactory.getPrimitiveTypeInfo(pCol.getType())); + } + return partColTypeInfos; + } + /** * Prunes partition names to see if they match the prune expression. - * @param columnNames name of partition columns + * @param partColumnNames name of partition columns + * @param partColumnTypeInfos types of partition columns * @param prunerExpr The expression to match. * @param defaultPartitionName name of default partition * @param partNames Partition names to filter. The list is modified in place. * @return Whether the list has any partitions for which the expression may or may not match. */ - public static boolean prunePartitionNames(List columnNames, ExprNodeGenericFuncDesc prunerExpr, + public static boolean prunePartitionNames(List partColumnNames, + List partColumnTypeInfos, ExprNodeGenericFuncDesc prunerExpr, String defaultPartitionName, List partNames) throws HiveException, MetaException { // Prepare the expression to filter on the columns. ObjectPair handle = - PartExprEvalUtils.prepareExpr(prunerExpr, columnNames); + PartExprEvalUtils.prepareExpr(prunerExpr, partColumnNames, partColumnTypeInfos); // Filter the name list. Removing elements one by one can be slow on e.g. ArrayList, // so let's create a new list and copy it if we don't have a linked list @@ -462,8 +476,8 @@ public static boolean prunePartitionNames(List columnNames, ExprNodeGene List partNamesSeq = inPlace ? partNames : new LinkedList(partNames); // Array for the values to pass to evaluator. - ArrayList values = new ArrayList(columnNames.size()); - for (int i = 0; i < columnNames.size(); ++i) { + ArrayList values = new ArrayList(partColumnNames.size()); + for (int i = 0; i < partColumnNames.size(); ++i) { values.add(null); } @@ -473,8 +487,17 @@ public static boolean prunePartitionNames(List columnNames, ExprNodeGene String partName = partIter.next(); Warehouse.makeValsFromName(partName, values); + ArrayList convertedValues = new ArrayList(values.size()); + for(int i=0; i authProviderClass = conf. + getClass(HiveConf.ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY.varname, + HiveAuthorizationTaskFactoryImpl.class, + HiveAuthorizationTaskFactory.class); + String msg = "Unable to create instance of " + authProviderClass.getName() + ": "; + try { + Constructor constructor = + authProviderClass.getConstructor(HiveConf.class, Hive.class); + return constructor.newInstance(conf, db); + } catch (NoSuchMethodException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (SecurityException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (InstantiationException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (IllegalAccessException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (IllegalArgumentException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (InvocationTargetException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index ce05fff..f412010 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -283,6 +283,8 @@ KW_ROLE: 'ROLE'; KW_ROLES: 'ROLES'; KW_INNER: 'INNER'; KW_EXCHANGE: 'EXCHANGE'; +KW_URI: 'URI'; +KW_SERVER : 'SERVER'; KW_ADMIN: 'ADMIN'; KW_OWNER: 'OWNER'; KW_PRINCIPALS: 'PRINCIPALS'; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index c903e8f..f1365fa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -109,6 +109,7 @@ TOK_DATE; TOK_DATELITERAL; TOK_DATETIME; TOK_TIMESTAMP; +TOK_TIMESTAMPLITERAL; TOK_STRING; TOK_CHAR; TOK_VARCHAR; @@ -340,6 +341,8 @@ TOK_VIRTUAL_TABLE; TOK_VIRTUAL_TABREF; TOK_ANONYMOUS; TOK_COL_NAME; +TOK_URI_TYPE; +TOK_SERVER_TYPE; } @@ -1485,11 +1488,15 @@ privilegeObject privObject : (KW_DATABASE|KW_SCHEMA) identifier -> ^(TOK_DB_TYPE identifier) | KW_TABLE? tableName partitionSpec? -> ^(TOK_TABLE_TYPE tableName partitionSpec?) + | KW_URI (path=StringLiteral) -> ^(TOK_URI_TYPE $path) + | KW_SERVER identifier -> ^(TOK_SERVER_TYPE identifier) ; privObjectCols : (KW_DATABASE|KW_SCHEMA) identifier -> ^(TOK_DB_TYPE identifier) | KW_TABLE? tableName (LPAREN cols=columnNameList RPAREN)? partitionSpec? -> ^(TOK_TABLE_TYPE tableName $cols? partitionSpec?) + | KW_URI (path=StringLiteral) -> ^(TOK_URI_TYPE $path) + | KW_SERVER identifier -> ^(TOK_SERVER_TYPE identifier) ; privilegeList diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index 13d5255..c960a6b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -218,6 +218,7 @@ constant : Number | dateLiteral + | timestampLiteral | StringLiteral | stringLiteralSequence | BigintLiteral @@ -250,6 +251,14 @@ dateLiteral } ; +timestampLiteral + : + KW_TIMESTAMP StringLiteral -> + { + adaptor.create(TOK_TIMESTAMPLITERAL, $StringLiteral.text) + } + ; + expression @init { gParent.pushMsg("expression specification", state); } @after { gParent.popMsg(state); } @@ -260,7 +269,6 @@ expression atomExpression : KW_NULL -> TOK_NULL - | dateLiteral | constant | castExpression | caseExpression @@ -545,5 +553,5 @@ principalIdentifier nonReserved : - KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION | KW_VALUES + KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION | KW_VALUES | KW_URI | KW_SERVER ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 88fd0fc..016a6d8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryProperties; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; @@ -318,9 +319,6 @@ private static final String VALUES_TMP_TABLE_NAME_PREFIX = "Values__Tmp__Table__"; - @VisibleForTesting - static final String ACID_TABLE_PROPERTY = "transactional"; - private HashMap opToPartPruner; private HashMap opToPartList; private HashMap> topOps; @@ -2764,11 +2762,14 @@ private Operator genNotNullFilterForJoinSourcePlan(QB qb, Operator input, // TODO: make aliases unique, otherwise needless rewriting takes place private Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel, ArrayList col_list, HashSet excludeCols, RowResolver input, - Integer pos, RowResolver output, List aliases, boolean ensureUniqueCols) - throws SemanticException { + RowResolver colSrcRR, Integer pos, RowResolver output, List aliases, + boolean ensureUniqueCols) throws SemanticException { + if (colSrcRR == null) { + colSrcRR = input; + } // The table alias should exist - if (tabAlias != null && !input.hasTableAlias(tabAlias)) { + if (tabAlias != null && !colSrcRR.hasTableAlias(tabAlias)) { throw new SemanticException(ErrorMsg.INVALID_TABLE_ALIAS.getMsg(sel)); } @@ -2797,7 +2798,7 @@ private Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel, // For expr "*", aliases should be iterated in the order they are specified // in the query. for (String alias : aliases) { - HashMap fMap = input.getFieldMap(alias); + HashMap fMap = colSrcRR.getFieldMap(alias); if (fMap == null) { continue; } @@ -2808,8 +2809,11 @@ private Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel, if (excludeCols != null && excludeCols.contains(colInfo)) { continue; // This was added during plan generation. } + // First, look up the column from the source against which * is to be resolved. + // We'd later translated this into the column from proper input, if it's valid. + // TODO: excludeCols may be possible to remove using the same technique. String name = colInfo.getInternalName(); - String[] tmp = input.reverseLookup(name); + String[] tmp = colSrcRR.reverseLookup(name); // Skip the colinfos which are not for this particular alias if (tabAlias != null && !tmp[0].equalsIgnoreCase(tabAlias)) { @@ -2825,6 +2829,27 @@ private Integer genColListRegex(String colRegex, String tabAlias, ASTNode sel, continue; } + // If input (GBY) is different than the source of columns, find the same column in input. + // TODO: This is fraught with peril. + if (input != colSrcRR) { + colInfo = input.get(tabAlias, tmp[1]); + if (colInfo == null) { + LOG.error("Cannot find colInfo for " + tabAlias + "." + tmp[1] + + ", derived from [" + colSrcRR + "], in [" + input + "]"); + throw new SemanticException(ErrorMsg.NON_KEY_EXPR_IN_GROUPBY, tmp[1]); + } + String oldCol = null; + if (LOG.isDebugEnabled()) { + oldCol = name + " => " + (tmp == null ? "null" : (tmp[0] + "." + tmp[1])); + } + name = colInfo.getInternalName(); + tmp = input.reverseLookup(name); + if (LOG.isDebugEnabled()) { + String newCol = name + " => " + (tmp == null ? "null" : (tmp[0] + "." + tmp[1])); + LOG.debug("Translated [" + oldCol + "] to [" + newCol + "]"); + } + } + ColumnInfo oColInfo = inputColsProcessed.get(colInfo); if (oColInfo == null) { ExprNodeColumnDesc expr = new ExprNodeColumnDesc(colInfo.getType(), @@ -3423,11 +3448,10 @@ private static boolean isRegex(String pattern, HiveConf conf) { } - private Operator genSelectPlan(String dest, QB qb, Operator input) - throws SemanticException { + private Operator genSelectPlan(String dest, QB qb, Operator input, + Operator inputForSelectStar) throws SemanticException { ASTNode selExprList = qb.getParseInfo().getSelForClause(dest); - - Operator op = genSelectPlan(selExprList, qb, input, false); + Operator op = genSelectPlan(selExprList, qb, input, inputForSelectStar, false); if (LOG.isDebugEnabled()) { LOG.debug("Created Select Plan for clause: " + dest); @@ -3437,8 +3461,8 @@ private static boolean isRegex(String pattern, HiveConf conf) { } @SuppressWarnings("nls") - private Operator genSelectPlan(ASTNode selExprList, QB qb, - Operator input, boolean outerLV) throws SemanticException { + private Operator genSelectPlan(ASTNode selExprList, QB qb, Operator input, + Operator inputForSelectStar, boolean outerLV) throws SemanticException { if (LOG.isDebugEnabled()) { LOG.debug("tree: " + selExprList.toStringTree()); @@ -3449,6 +3473,10 @@ private static boolean isRegex(String pattern, HiveConf conf) { ASTNode trfm = null; Integer pos = Integer.valueOf(0); RowResolver inputRR = opParseCtx.get(input).getRowResolver(); + RowResolver starRR = null; + if (inputForSelectStar != null && inputForSelectStar != input) { + starRR = opParseCtx.get(inputForSelectStar).getRowResolver(); + } // SELECT * or SELECT TRANSFORM(*) boolean selectStar = false; int posn = 0; @@ -3494,7 +3522,7 @@ private static boolean isRegex(String pattern, HiveConf conf) { } if (isUDTF && (selectStar = udtfExprType == HiveParser.TOK_FUNCTIONSTAR)) { genColListRegex(".*", null, (ASTNode) udtfExpr.getChild(0), - col_list, null, inputRR, pos, out_rwsch, qb.getAliases(), false); + col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false); } } @@ -3547,7 +3575,7 @@ private static boolean isRegex(String pattern, HiveConf conf) { } if (LOG.isDebugEnabled()) { - LOG.debug("genSelectPlan: input = " + inputRR.toString()); + LOG.debug("genSelectPlan: input = " + inputRR + " starRr = " + starRR); } // For UDTF's, skip the function name to get the expressions @@ -3616,7 +3644,7 @@ private static boolean isRegex(String pattern, HiveConf conf) { if (expr.getType() == HiveParser.TOK_ALLCOLREF) { pos = genColListRegex(".*", expr.getChildCount() == 0 ? null : getUnescapedName((ASTNode) expr.getChild(0)).toLowerCase(), - expr, col_list, null, inputRR, pos, out_rwsch, qb.getAliases(), false); + expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false); selectStar = true; } else if (expr.getType() == HiveParser.TOK_TABLE_OR_COL && !hasAsClause && !inputRR.getIsExprResolver() @@ -3625,7 +3653,7 @@ private static boolean isRegex(String pattern, HiveConf conf) { // This can only happen without AS clause // We don't allow this for ExprResolver - the Group By case pos = genColListRegex(unescapeIdentifier(expr.getChild(0).getText()), - null, expr, col_list, null, inputRR, pos, out_rwsch, qb.getAliases(), false); + null, expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false); } else if (expr.getType() == HiveParser.DOT && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL && inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0) @@ -3637,7 +3665,7 @@ private static boolean isRegex(String pattern, HiveConf conf) { // We don't allow this for ExprResolver - the Group By case pos = genColListRegex(unescapeIdentifier(expr.getChild(1).getText()), unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()), - expr, col_list, null, inputRR, pos, out_rwsch, qb.getAliases(), false); + expr, col_list, null, inputRR, starRR, pos, out_rwsch, qb.getAliases(), false); } else { // Case when this is an expression TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR); @@ -5165,7 +5193,8 @@ private Operator genGroupByPlan1ReduceMultiGBY(List dests, QB qb, Operat Operator groupByOperatorInfo = genGroupByPlanGroupByOperator(parseInfo, dest, curr, reduceSinkOperatorInfo, GroupByDesc.Mode.COMPLETE, null); - curr = genPostGroupByBodyPlan(groupByOperatorInfo, dest, qb, aliasToOpInfo); + // TODO: should we pass curr instead of null? + curr = genPostGroupByBodyPlan(groupByOperatorInfo, dest, qb, aliasToOpInfo, null); } return curr; @@ -8782,7 +8811,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT for (String dest : ks) { curr = input; curr = genGroupByPlan2MRMultiGroupBy(dest, qb, curr); - curr = genSelectPlan(dest, qb, curr); + curr = genSelectPlan(dest, qb, curr, null); // TODO: we may need to pass "input" here instead of null Integer limit = qbp.getDestLimit(dest); if (limit != null) { curr = genLimitMapRedPlan(dest, qb, curr, limit.intValue(), true); @@ -8839,6 +8868,8 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT ASTNode whereExpr = qb.getParseInfo().getWhrForClause(dest); curr = genFilterPlan((ASTNode) whereExpr.getChild(0), qb, curr, aliasToOpInfo, false); } + // Preserve operator before the GBY - we'll use it to resolve '*' + Operator gbySource = curr; if (qbp.getAggregationExprsForClause(dest).size() != 0 || getGroupByForClause(qbp, dest).size() > 0) { @@ -8863,8 +8894,12 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT curr = genGroupByPlan1MR(dest, qb, curr); } } + if (LOG.isDebugEnabled()) { + LOG.debug("RR before GB " + opParseCtx.get(gbySource).getRowResolver() + + " after GB " + opParseCtx.get(curr).getRowResolver()); + } - curr = genPostGroupByBodyPlan(curr, dest, qb, aliasToOpInfo); + curr = genPostGroupByBodyPlan(curr, dest, qb, aliasToOpInfo, gbySource); } } else { curr = genGroupByPlan1ReduceMultiGBY(commonGroupByDestGroup, qb, input, aliasToOpInfo); @@ -8891,7 +8926,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT } private Operator genPostGroupByBodyPlan(Operator curr, String dest, QB qb, - Map aliasToOpInfo) + Map aliasToOpInfo, Operator gbySource) throws SemanticException { QBParseInfo qbp = qb.getParseInfo(); @@ -8909,7 +8944,7 @@ private Operator genPostGroupByBodyPlan(Operator curr, String dest, QB qb, curr = genWindowingPlan(qb.getWindowingSpec(dest), curr); } - curr = genSelectPlan(dest, qb, curr); + curr = genSelectPlan(dest, qb, curr, gbySource); Integer limit = qbp.getDestLimit(dest); // Expressions are not supported currently without a alias. @@ -9866,7 +9901,7 @@ private Operator genLateralViewPlan(QB qb, Operator op, ASTNode lateralViewTree) // Get the UDTF Path QB blankQb = new QB(null, null, false); Operator udtfPath = genSelectPlan((ASTNode) lateralViewTree - .getChild(0), blankQb, lvForward, + .getChild(0), blankQb, lvForward, null, lateralViewTree.getType() == HiveParser.TOK_LATERAL_VIEW_OUTER); // add udtf aliases to QB for (String udtfAlias : blankQb.getAliases()) { @@ -12415,7 +12450,9 @@ else return (ltd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : private boolean isAcidTable(Table tab) { if (tab == null) return false; if (!SessionState.get().getTxnMgr().supportsAcid()) return false; - return tab.getProperty(ACID_TABLE_PROPERTY) != null; + String tableIsTransactional = + tab.getProperty(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL); + return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); } private boolean isAcidOutputFormat(Class of) { @@ -14203,8 +14240,8 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep if (expr.getType() == HiveParser.TOK_ALLCOLREF) { pos = genColListRegex(".*", expr.getChildCount() == 0 ? null : getUnescapedName((ASTNode) expr.getChild(0)) - .toLowerCase(), expr, col_list, excludedColumns, inputRR, pos, out_rwsch, - tabAliasesForAllProjs, true); + .toLowerCase(), expr, col_list, excludedColumns, inputRR, null, pos, + out_rwsch, tabAliasesForAllProjs, true); selectStar = true; } else if (expr.getType() == HiveParser.TOK_TABLE_OR_COL && !hasAsClause && !inputRR.getIsExprResolver() @@ -14213,7 +14250,8 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep // This can only happen without AS clause // We don't allow this for ExprResolver - the Group By case pos = genColListRegex(unescapeIdentifier(expr.getChild(0).getText()), null, expr, - col_list, excludedColumns, inputRR, pos, out_rwsch, tabAliasesForAllProjs, true); + col_list, excludedColumns, inputRR, null, pos, out_rwsch, tabAliasesForAllProjs, + true); } else if (expr.getType() == HiveParser.DOT && expr.getChild(0).getType() == HiveParser.TOK_TABLE_OR_COL && inputRR.hasTableAlias(unescapeIdentifier(expr.getChild(0).getChild(0).getText() @@ -14224,7 +14262,8 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel) throws SemanticExcep // We don't allow this for ExprResolver - the Group By case pos = genColListRegex(unescapeIdentifier(expr.getChild(1).getText()), unescapeIdentifier(expr.getChild(0).getChild(0).getText().toLowerCase()), expr, - col_list, excludedColumns, inputRR, pos, out_rwsch, tabAliasesForAllProjs, true); + col_list, excludedColumns, inputRR, null, pos, out_rwsch, tabAliasesForAllProjs, + true); } else if (expr.toStringTree().contains("TOK_FUNCTIONDI") && !(srcRel instanceof HiveAggregateRel)) { // Likely a malformed query eg, select hash(distinct c1) from t1; throw new OptiqSemanticException("Distinct without an aggreggation."); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index 364cad9..ea12990 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -150,6 +150,7 @@ private void runCycleAnalysisForPartitionPruning(OptimizeTezProcContext procCtx, LOG.info("Found cycle in operator plan..."); cycleFree = false; removeEventOperator(component); + break; } } LOG.info("Cycle free: " + cycleFree); @@ -227,7 +228,7 @@ private void connect(Operator o, AtomicInteger index, Stack> node for (Operator child : children) { if (!indexes.containsKey(child)) { connect(child, index, nodes, indexes, lowLinks, components); - lowLinks.put(child, Math.min(lowLinks.get(o), lowLinks.get(child))); + lowLinks.put(o, Math.min(lowLinks.get(o), lowLinks.get(child))); } else if (nodes.contains(child)) { lowLinks.put(o, Math.min(lowLinks.get(o), indexes.get(child))); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java index e065983..5decffb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.ql.parse; import java.sql.Date; +import java.sql.Timestamp; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -73,6 +74,8 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; +import com.google.common.collect.Lists; + /** * The Factory for creating typecheck processors. The typecheck processors are @@ -170,7 +173,8 @@ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) tf.getStrExprProcessor()); opRules.put(new RuleRegExp("R4", HiveParser.KW_TRUE + "%|" + HiveParser.KW_FALSE + "%"), tf.getBoolExprProcessor()); - opRules.put(new RuleRegExp("R5", HiveParser.TOK_DATELITERAL + "%"), tf.getDateExprProcessor()); + opRules.put(new RuleRegExp("R5", HiveParser.TOK_DATELITERAL + "%|" + + HiveParser.TOK_TIMESTAMPLITERAL + "%"), tf.getDateTimeExprProcessor()); opRules.put(new RuleRegExp("R6", HiveParser.TOK_TABLE_OR_COL + "%"), tf.getColumnExprProcessor()); opRules.put(new RuleRegExp("R7", HiveParser.TOK_SUBQUERY_OP + "%"), @@ -182,9 +186,8 @@ public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) opRules, tcCtx); GraphWalker ogw = new DefaultGraphWalker(disp); - // Create a list of topop nodes - ArrayList topNodes = new ArrayList(); - topNodes.add(expr); + // Create a list of top nodes + ArrayList topNodes = Lists.newArrayList(expr); HashMap nodeOutputs = new LinkedHashMap(); ogw.startWalking(topNodes, nodeOutputs); @@ -420,7 +423,7 @@ public BoolExprProcessor getBoolExprProcessor() { /** * Processor for date constants. */ - public static class DateExprProcessor implements NodeProcessor { + public static class DateTimeExprProcessor implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, @@ -437,14 +440,24 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } ASTNode expr = (ASTNode) nd; + String timeString = BaseSemanticAnalyzer.stripQuotes(expr.getText()); // Get the string value and convert to a Date value. try { - String dateString = BaseSemanticAnalyzer.stripQuotes(expr.getText()); - Date date = Date.valueOf(dateString); - return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo, date); - } catch (IllegalArgumentException err) { - throw new SemanticException("Unable to convert date literal string to date value.", err); + // todo replace below with joda-time, which supports timezone + if (expr.getType() == HiveParser.TOK_DATELITERAL) { + PrimitiveTypeInfo typeInfo = TypeInfoFactory.dateTypeInfo; + return new ExprNodeConstantDesc(typeInfo, + Date.valueOf(timeString)); + } + if (expr.getType() == HiveParser.TOK_TIMESTAMPLITERAL) { + return new ExprNodeConstantDesc(TypeInfoFactory.timestampTypeInfo, + Timestamp.valueOf(timeString)); + } + throw new IllegalArgumentException("Invalid time literal type " + expr.getType()); + } catch (Exception err) { + throw new SemanticException( + "Unable to convert time literal '" + timeString + "' to time value.", err); } } } @@ -454,8 +467,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * * @return DateExprProcessor. */ - public DateExprProcessor getDateExprProcessor() { - return new DateExprProcessor(); + public DateTimeExprProcessor getDateTimeExprProcessor() { + return new DateTimeExprProcessor(); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java index 70d9b7a..f5e8f62 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactory.java @@ -21,6 +21,8 @@ import java.util.HashSet; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; +import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; @@ -32,6 +34,8 @@ * tasks. Every method in this class may return null, indicating no task * needs to be executed or can throw a SemanticException. */ +@LimitedPrivate(value = { "Apache Hive, Apache Sentry (incubating)" }) +@Evolving public interface HiveAuthorizationTaskFactory { public Task createCreateRoleTask(ASTNode node, HashSet inputs, HashSet outputs) throws SemanticException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java index 10ef7e5..dc34507 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/authorization/HiveAuthorizationTaskFactoryImpl.java @@ -238,7 +238,7 @@ private PrivilegeObjectDesc analyzePrivilegeObject(ASTNode ast, return subject; } - private PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticException { + protected PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticException { PrivilegeObjectDesc subject = new PrivilegeObjectDesc(); ASTNode child = (ASTNode) ast.getChild(0); ASTNode gchild = (ASTNode)child.getChild(0); @@ -246,6 +246,8 @@ private PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticExceptio subject.setTable(true); String[] qualified = BaseSemanticAnalyzer.getQualifiedTableName(gchild); subject.setObject(BaseSemanticAnalyzer.getDotName(qualified)); + } else if (child.getType() == HiveParser.TOK_URI_TYPE || child.getType() == HiveParser.TOK_SERVER_TYPE) { + throw new SemanticException("Hive authorization does not support the URI or SERVER objects"); } else { subject.setTable(false); subject.setObject(BaseSemanticAnalyzer.unescapeIdentifier(gchild.getText())); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java index 9e72ccc..3dbdba6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MergeJoinWork.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.plan; import java.util.ArrayList; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java index b074ca9..d6067e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsAggregator.java @@ -134,6 +134,7 @@ public ResultSet run(PreparedStatement stmt) throws SQLException { } }; + fileID = JDBCStatsUtils.truncateRowId(fileID); String keyPrefix = Utilities.escapeSqlLike(fileID) + "%"; for (int failures = 0;; failures++) { try { @@ -147,7 +148,7 @@ public ResultSet run(PreparedStatement stmt) throws SQLException { if (result.next()) { retval = result.getLong(1); } else { - LOG.warn("Warning. Nothing published. Nothing aggregated."); + LOG.warn("Nothing published. Nothing aggregated."); return null; } return Long.toString(retval); @@ -217,6 +218,7 @@ public Void run(PreparedStatement stmt) throws SQLException { }; try { + rowID = JDBCStatsUtils.truncateRowId(rowID); String keyPrefix = Utilities.escapeSqlLike(rowID) + "%"; PreparedStatement delStmt = Utilities.prepareWithRetry(conn, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java index 5e317ab..c1621e0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsPublisher.java @@ -139,7 +139,11 @@ public boolean publishStat(String fileID, Map stats) { + " stats: " + JDBCStatsUtils.getSupportedStatistics()); return false; } - LOG.info("Stats publishing for key " + fileID); + String rowId = JDBCStatsUtils.truncateRowId(fileID); + if (LOG.isInfoEnabled()) { + String truncateSuffix = (rowId != fileID) ? " (from " + fileID + ")" : ""; // object equality + LOG.info("Stats publishing for key " + rowId + truncateSuffix); + } Utilities.SQLCommand execUpdate = new Utilities.SQLCommand() { @Override @@ -153,7 +157,7 @@ public Void run(PreparedStatement stmt) throws SQLException { for (int failures = 0;; failures++) { try { - insStmt.setString(1, fileID); + insStmt.setString(1, rowId); for (int i = 0; i < JDBCStatsUtils.getSupportedStatistics().size(); i++) { insStmt.setString(i + 2, stats.get(supportedStatistics.get(i))); } @@ -172,10 +176,10 @@ public Void run(PreparedStatement stmt) throws SQLException { for (i = 0; i < JDBCStatsUtils.getSupportedStatistics().size(); i++) { updStmt.setString(i + 1, stats.get(supportedStatistics.get(i))); } - updStmt.setString(supportedStatistics.size() + 1, fileID); + updStmt.setString(supportedStatistics.size() + 1, rowId); updStmt.setString(supportedStatistics.size() + 2, stats.get(JDBCStatsUtils.getBasicStat())); - updStmt.setString(supportedStatistics.size() + 3, fileID); + updStmt.setString(supportedStatistics.size() + 3, rowId); Utilities.executeWithRetry(execUpdate, updStmt, waitWindow, maxRetries); return true; } catch (SQLRecoverableException ue) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsSetupConstants.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsSetupConstants.java index 70badf2..b999f8a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsSetupConstants.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsSetupConstants.java @@ -34,4 +34,7 @@ public static final String PART_STAT_RAW_DATA_SIZE_COLUMN_NAME = "RAW_DATA_SIZE"; + // 255 is an old value that we will keep for now; it can be increased to 4000; limits are + // MySQL - 65535, SQL Server - 8000, Oracle - 4000, Derby - 32762, Postgres - large. + public static final int ID_COLUMN_VARCHAR_SIZE = 255; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java index 4625d27..383314b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/jdbc/JDBCStatsUtils.java @@ -24,6 +24,7 @@ import java.util.Map; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.util.hash.MurmurHash; public class JDBCStatsUtils { @@ -124,9 +125,10 @@ public static String getBasicStat() { * Prepares CREATE TABLE query */ public static String getCreate(String comment) { - String create = "CREATE TABLE /* " + comment + " */ " + JDBCStatsUtils.getStatTableName() + - " (" + getTimestampColumnName() + " TIMESTAMP DEFAULT CURRENT_TIMESTAMP, " + - JDBCStatsUtils.getIdColumnName() + " VARCHAR(255) PRIMARY KEY "; + String create = "CREATE TABLE /* " + comment + " */ " + JDBCStatsUtils.getStatTableName() + + " (" + getTimestampColumnName() + " TIMESTAMP DEFAULT CURRENT_TIMESTAMP, " + + JDBCStatsUtils.getIdColumnName() + " VARCHAR(" + + JDBCStatsSetupConstants.ID_COLUMN_VARCHAR_SIZE + ") PRIMARY KEY "; for (int i = 0; i < supportedStats.size(); i++) { create += ", " + getStatColumnName(supportedStats.get(i)) + " BIGINT "; } @@ -191,4 +193,13 @@ public static String getDeleteAggr(String rowID, String comment) { return delete; } + /** + * Make sure the row ID fits into the row ID column in the table. + * @param rowId Row ID. + * @return Resulting row ID truncated to correct length, if necessary. + */ + public static String truncateRowId(String rowId) { + return (rowId.length() <= JDBCStatsSetupConstants.ID_COLUMN_VARCHAR_SIZE) + ? rowId : Integer.toHexString(MurmurHash.getInstance().hash(rowId.getBytes())); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index 146ebda..1aed097 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -69,7 +69,7 @@ public void run() { // This is solely for testing. It checks if the test has set the looped value to false, // and if so remembers that and then sets it to true at the end. We have to check here // first to make sure we go through a complete iteration of the loop before resetting it. - boolean setLooped = !looped.boolVal; + boolean setLooped = !looped.get(); // Make sure nothing escapes this run method and kills the metastore at large, // so wrap it in a big catch Throwable statement. try { @@ -137,16 +137,16 @@ public void run() { // Now, go back to bed until it's time to do this again long elapsedTime = System.currentTimeMillis() - startedAt; - if (elapsedTime >= cleanerCheckInterval || stop.boolVal) continue; + if (elapsedTime >= cleanerCheckInterval || stop.get()) continue; else Thread.sleep(cleanerCheckInterval - elapsedTime); } catch (Throwable t) { LOG.error("Caught an exception in the main loop of compactor cleaner, " + StringUtils.stringifyException(t)); } if (setLooped) { - looped.boolVal = true; + looped.set(true); } - } while (!stop.boolVal); + } while (!stop.get()); } private Set findRelatedLocks(CompactionInfo ci, ShowLocksResponse locksResponse) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index 2bba731..7d097fd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -40,6 +40,7 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; /** * Superclass for all threads in the compactor. @@ -52,8 +53,8 @@ protected CompactionTxnHandler txnHandler; protected RawStore rs; protected int threadId; - protected BooleanPointer stop; - protected BooleanPointer looped; + protected AtomicBoolean stop; + protected AtomicBoolean looped; @Override public void setHiveConf(HiveConf conf) { @@ -67,7 +68,7 @@ public void setThreadId(int threadId) { } @Override - public void init(BooleanPointer stop, BooleanPointer looped) throws MetaException { + public void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException { this.stop = stop; this.looped = looped; setPriority(MIN_PRIORITY); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index 5545bf7..b8e7c70 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.txn.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnHandler; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -44,6 +45,7 @@ import java.util.List; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; /** * A class to initiate compactions. This will run in a separate thread. @@ -52,8 +54,6 @@ static final private String CLASS_NAME = Initiator.class.getName(); static final private Log LOG = LogFactory.getLog(CLASS_NAME); - static final private String NO_COMPACTION = "NO_AUTO_COMPACTION"; - private long checkInterval; @Override @@ -85,9 +85,8 @@ public void run() { try { Table t = resolveTable(ci); // check if no compaction set for this table - if (t.getParameters().get(NO_COMPACTION) != null) { - LOG.info("Table " + tableName(t) + " marked " + NO_COMPACTION + - " so we will not compact it."); + if (noAutoCompactSet(t)) { + LOG.info("Table " + tableName(t) + " marked true so we will not compact it."); continue; } @@ -126,10 +125,10 @@ public void run() { } long elapsedTime = System.currentTimeMillis() - startedAt; - if (elapsedTime >= checkInterval || stop.boolVal) continue; + if (elapsedTime >= checkInterval || stop.get()) continue; else Thread.sleep(checkInterval - elapsedTime); - } while (!stop.boolVal); + } while (!stop.get()); } catch (Throwable t) { LOG.error("Caught an exception in the main loop of compactor initiator, exiting " + StringUtils.stringifyException(t)); @@ -137,7 +136,7 @@ public void run() { } @Override - public void init(BooleanPointer stop, BooleanPointer looped) throws MetaException { + public void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException { super.init(stop, looped); checkInterval = conf.getTimeVar(HiveConf.ConfVars.HIVE_COMPACTOR_CHECK_INTERVAL, TimeUnit.MILLISECONDS) ; @@ -278,4 +277,16 @@ private void requestCompaction(CompactionInfo ci, String runAs, CompactionType t rqst.setRunas(runAs); txnHandler.compact(rqst); } + + // Because TABLE_NO_AUTO_COMPACT was originally assumed to be NO_AUTO_COMPACT and then was moved + // to no_auto_compact, we need to check it in both cases. + private boolean noAutoCompactSet(Table t) { + String noAutoCompact = + t.getParameters().get(hive_metastoreConstants.TABLE_NO_AUTO_COMPACT); + if (noAutoCompact == null) { + noAutoCompact = + t.getParameters().get(hive_metastoreConstants.TABLE_NO_AUTO_COMPACT.toUpperCase()); + } + return noAutoCompact != null && noAutoCompact.equalsIgnoreCase("true"); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index e2388e7..c4a4c39 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -42,6 +42,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; /** * A class to do compactions. This will run in a separate thread. It will spin on the @@ -77,7 +78,7 @@ public void run() { do { CompactionInfo ci = txnHandler.findNextToCompact(name); - if (ci == null && !stop.boolVal) { + if (ci == null && !stop.get()) { try { Thread.sleep(SLEEP_TIME); continue; @@ -160,7 +161,7 @@ public Object run() throws Exception { ". Marking clean to avoid repeated failures, " + StringUtils.stringifyException(e)); txnHandler.markCleaned(ci); } - } while (!stop.boolVal); + } while (!stop.get()); } catch (Throwable t) { LOG.error("Caught an exception in the main loop of compactor worker " + name + ", exiting " + StringUtils.stringifyException(t)); @@ -168,7 +169,7 @@ public Object run() throws Exception { } @Override - public void init(BooleanPointer stop, BooleanPointer looped) throws MetaException { + public void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException { super.init(stop, looped); StringBuilder name = new StringBuilder(hostname()); diff --git a/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto b/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto index 31c49f1..cbfe57b 100644 --- a/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto +++ b/ql/src/protobuf/org/apache/hadoop/hive/ql/io/orc/orc_proto.proto @@ -191,8 +191,15 @@ message PostScript { optional uint64 footerLength = 1; optional CompressionKind compression = 2; optional uint64 compressionBlockSize = 3; + // the version of the file format + // [0, 11] = Hive 0.11 + // [0, 12] = Hive 0.12 repeated uint32 version = 4 [packed = true]; optional uint64 metadataLength = 5; + // Version of the writer: + // 0 (or missing) = original + // 1 = HIVE-8732 fixed + optional uint32 writerVersion = 6; // Leave this last in the record optional string magic = 8000; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java index 3fb770f..7bb2742 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.exec; import org.apache.commons.logging.Log; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java index e0ca6fb..da9ebca 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestDecimalUtil.java @@ -38,8 +38,9 @@ public void testFloor() { DecimalUtil.floor(0, d1, dcv); Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); + // As of HIVE-8745, these decimal values should be trimmed of trailing zeros. HiveDecimal d2 = HiveDecimal.create("23.00000"); - Assert.assertEquals(5, d2.scale()); + Assert.assertEquals(0, d2.scale()); HiveDecimal expected2 = HiveDecimal.create("23"); DecimalUtil.floor(0, d2, dcv); Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); @@ -50,19 +51,19 @@ public void testFloor() { Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); HiveDecimal d4 = HiveDecimal.create("-17.00000"); - Assert.assertEquals(5, d4.scale()); + Assert.assertEquals(0, d4.scale()); HiveDecimal expected4 = HiveDecimal.create("-17"); DecimalUtil.floor(0, d4, dcv); Assert.assertEquals(0, expected4.compareTo(dcv.vector[0].getHiveDecimal())); HiveDecimal d5 = HiveDecimal.create("-0.30000"); - Assert.assertEquals(5, d5.scale()); + Assert.assertEquals(1, d5.scale()); HiveDecimal expected5 = HiveDecimal.create("-1"); DecimalUtil.floor(0, d5, dcv); Assert.assertEquals(0, expected5.compareTo(dcv.vector[0].getHiveDecimal())); HiveDecimal d6 = HiveDecimal.create("0.30000"); - Assert.assertEquals(5, d6.scale()); + Assert.assertEquals(1, d6.scale()); HiveDecimal expected6 = HiveDecimal.create("0"); DecimalUtil.floor(0, d6, dcv); Assert.assertEquals(0, expected6.compareTo(dcv.vector[0].getHiveDecimal())); @@ -76,8 +77,9 @@ public void testCeiling() { DecimalUtil.ceiling(0, d1, dcv); Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); + // As of HIVE-8745, these decimal values should be trimmed of trailing zeros. HiveDecimal d2 = HiveDecimal.create("23.00000"); - Assert.assertEquals(5, d2.scale()); + Assert.assertEquals(0, d2.scale()); HiveDecimal expected2 = HiveDecimal.create("23"); DecimalUtil.ceiling(0, d2, dcv); Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); @@ -88,19 +90,19 @@ public void testCeiling() { Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); HiveDecimal d4 = HiveDecimal.create("-17.00000"); - Assert.assertEquals(5, d4.scale()); + Assert.assertEquals(0, d4.scale()); HiveDecimal expected4 = HiveDecimal.create("-17"); DecimalUtil.ceiling(0, d4, dcv); Assert.assertEquals(0, expected4.compareTo(dcv.vector[0].getHiveDecimal())); HiveDecimal d5 = HiveDecimal.create("-0.30000"); - Assert.assertEquals(5, d5.scale()); + Assert.assertEquals(1, d5.scale()); HiveDecimal expected5 = HiveDecimal.create("0"); DecimalUtil.ceiling(0, d5, dcv); Assert.assertEquals(0, expected5.compareTo(dcv.vector[0].getHiveDecimal())); HiveDecimal d6 = HiveDecimal.create("0.30000"); - Assert.assertEquals(5, d6.scale()); + Assert.assertEquals(1, d6.scale()); HiveDecimal expected6 = HiveDecimal.create("1"); DecimalUtil.ceiling(0, d6, dcv); Assert.assertEquals(0, expected6.compareTo(dcv.vector[0].getHiveDecimal())); @@ -127,8 +129,9 @@ public void testRound() { DecimalUtil.round(0, d1, dcv); Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); + // As of HIVE-8745, these decimal values should be trimmed of trailing zeros. HiveDecimal d2 = HiveDecimal.create("23.00000"); - Assert.assertEquals(5, d2.scale()); + Assert.assertEquals(0, d2.scale()); HiveDecimal expected2 = HiveDecimal.create("23"); DecimalUtil.round(0, d2, dcv); Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); @@ -139,7 +142,7 @@ public void testRound() { Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); HiveDecimal d4 = HiveDecimal.create("-17.00000"); - Assert.assertEquals(5, d4.scale()); + Assert.assertEquals(0, d4.scale()); HiveDecimal expected4 = HiveDecimal.create("-17"); DecimalUtil.round(0, d4, dcv); Assert.assertEquals(0, expected4.compareTo(dcv.vector[0].getHiveDecimal())); @@ -163,8 +166,9 @@ public void testRoundWithDigits() { DecimalUtil.round(0, d1, dcv); Assert.assertEquals(0, expected1.compareTo(dcv.vector[0].getHiveDecimal())); + // As of HIVE-8745, these decimal values should be trimmed of trailing zeros. HiveDecimal d2 = HiveDecimal.create("23.56700"); - Assert.assertEquals(5, d2.scale()); + Assert.assertEquals(3, d2.scale()); HiveDecimal expected2 = HiveDecimal.create("23.567"); DecimalUtil.round(0, d2, dcv); Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); @@ -175,7 +179,7 @@ public void testRoundWithDigits() { Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); HiveDecimal d4 = HiveDecimal.create("-17.23400"); - Assert.assertEquals(5, d4.scale()); + Assert.assertEquals(3, d4.scale()); HiveDecimal expected4 = HiveDecimal.create("-17.234"); DecimalUtil.round(0, d4, dcv); Assert.assertEquals(0, expected4.compareTo(dcv.vector[0].getHiveDecimal())); @@ -204,8 +208,9 @@ public void testNegate() { DecimalUtil.negate(0, d2, dcv); Assert.assertEquals(0, expected2.compareTo(dcv.vector[0].getHiveDecimal())); + // As of HIVE-8745, these decimal values should be trimmed of trailing zeros. HiveDecimal d3 = HiveDecimal.create("0.00000"); - Assert.assertEquals(5, d3.scale()); + Assert.assertEquals(0, d3.scale()); HiveDecimal expected3 = HiveDecimal.create("0"); DecimalUtil.negate(0, d3, dcv); Assert.assertEquals(0, expected3.compareTo(dcv.vector[0].getHiveDecimal())); @@ -223,7 +228,7 @@ public void testSign() { Assert.assertEquals(-1, lcv.vector[0]); HiveDecimal d3 = HiveDecimal.create("0.00000"); - Assert.assertEquals(5, d3.scale()); + Assert.assertEquals(0, d3.scale()); d3.setScale(5); DecimalUtil.sign(0, d3, lcv); Assert.assertEquals(0, lcv.vector[0]); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java index 0802e3b..c867a3c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java @@ -323,18 +323,19 @@ public void testCastDecimalToString() { expr.evaluate(b); BytesColumnVector r = (BytesColumnVector) b.cols[1]; - byte[] v = toBytes("1.10"); + // As of HIVE-8745, these decimal values should be trimmed of trailing zeros. + byte[] v = toBytes("1.1"); assertTrue(((Integer) v.length).toString() + " " + r.length[0], v.length == r.length[0]); Assert.assertEquals(0, StringExpr.compare(v, 0, v.length, r.vector[0], r.start[0], r.length[0])); - v = toBytes("-2.20"); + v = toBytes("-2.2"); Assert.assertEquals(0, StringExpr.compare(v, 0, v.length, r.vector[1], r.start[1], r.length[1])); - v = toBytes("9999999999999999.00"); + v = toBytes("9999999999999999"); Assert.assertEquals(0, StringExpr.compare(v, 0, v.length, r.vector[2], r.start[2], r.length[2])); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java new file mode 100644 index 0000000..dbd38c8 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestColumnStatistics.java @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.io.orc; + +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.serde2.io.DateWritable; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.io.Text; +import org.junit.Test; + +import java.sql.Timestamp; + +import static junit.framework.Assert.assertEquals; + +/** + * Test ColumnStatisticsImpl for ORC. + */ +public class TestColumnStatistics { + + @Test + public void testLongMerge() throws Exception { + ObjectInspector inspector = + PrimitiveObjectInspectorFactory.javaIntObjectInspector; + + ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(inspector); + ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(inspector); + stats1.updateInteger(10); + stats1.updateInteger(10); + stats2.updateInteger(1); + stats2.updateInteger(1000); + stats1.merge(stats2); + IntegerColumnStatistics typed = (IntegerColumnStatistics) stats1; + assertEquals(1, typed.getMinimum()); + assertEquals(1000, typed.getMaximum()); + stats1.reset(); + stats1.updateInteger(-10); + stats1.updateInteger(10000); + stats1.merge(stats2); + assertEquals(-10, typed.getMinimum()); + assertEquals(10000, typed.getMaximum()); + } + + @Test + public void testDoubleMerge() throws Exception { + ObjectInspector inspector = + PrimitiveObjectInspectorFactory.javaDoubleObjectInspector; + + ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(inspector); + ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(inspector); + stats1.updateDouble(10.0); + stats1.updateDouble(100.0); + stats2.updateDouble(1.0); + stats2.updateDouble(1000.0); + stats1.merge(stats2); + DoubleColumnStatistics typed = (DoubleColumnStatistics) stats1; + assertEquals(1.0, typed.getMinimum(), 0.001); + assertEquals(1000.0, typed.getMaximum(), 0.001); + stats1.reset(); + stats1.updateDouble(-10); + stats1.updateDouble(10000); + stats1.merge(stats2); + assertEquals(-10, typed.getMinimum(), 0.001); + assertEquals(10000, typed.getMaximum(), 0.001); + } + + + @Test + public void testStringMerge() throws Exception { + ObjectInspector inspector = + PrimitiveObjectInspectorFactory.javaStringObjectInspector; + + ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(inspector); + ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(inspector); + stats1.updateString(new Text("bob")); + stats1.updateString(new Text("david")); + stats1.updateString(new Text("charles")); + stats2.updateString(new Text("anne")); + stats2.updateString(new Text("erin")); + stats1.merge(stats2); + StringColumnStatistics typed = (StringColumnStatistics) stats1; + assertEquals("anne", typed.getMinimum()); + assertEquals("erin", typed.getMaximum()); + stats1.reset(); + stats1.updateString(new Text("aaa")); + stats1.updateString(new Text("zzz")); + stats1.merge(stats2); + assertEquals("aaa", typed.getMinimum()); + assertEquals("zzz", typed.getMaximum()); + } + + @Test + public void testDateMerge() throws Exception { + ObjectInspector inspector = + PrimitiveObjectInspectorFactory.javaDateObjectInspector; + + ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(inspector); + ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(inspector); + stats1.updateDate(new DateWritable(1000)); + stats1.updateDate(new DateWritable(100)); + stats2.updateDate(new DateWritable(10)); + stats2.updateDate(new DateWritable(2000)); + stats1.merge(stats2); + DateColumnStatistics typed = (DateColumnStatistics) stats1; + assertEquals(new DateWritable(10), typed.getMinimum()); + assertEquals(new DateWritable(2000), typed.getMaximum()); + stats1.reset(); + stats1.updateDate(new DateWritable(-10)); + stats1.updateDate(new DateWritable(10000)); + stats1.merge(stats2); + assertEquals(-10, typed.getMinimum().getDays()); + assertEquals(10000, typed.getMaximum().getDays()); + } + + @Test + public void testTimestampMerge() throws Exception { + ObjectInspector inspector = + PrimitiveObjectInspectorFactory.javaTimestampObjectInspector; + + ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(inspector); + ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(inspector); + stats1.updateTimestamp(new Timestamp(10)); + stats1.updateTimestamp(new Timestamp(100)); + stats2.updateTimestamp(new Timestamp(1)); + stats2.updateTimestamp(new Timestamp(1000)); + stats1.merge(stats2); + TimestampColumnStatistics typed = (TimestampColumnStatistics) stats1; + assertEquals(1, typed.getMinimum().getTime()); + assertEquals(1000, typed.getMaximum().getTime()); + stats1.reset(); + stats1.updateTimestamp(new Timestamp(-10)); + stats1.updateTimestamp(new Timestamp(10000)); + stats1.merge(stats2); + assertEquals(-10, typed.getMinimum().getTime()); + assertEquals(10000, typed.getMaximum().getTime()); + } + + @Test + public void testDecimalMerge() throws Exception { + ObjectInspector inspector = + PrimitiveObjectInspectorFactory.javaHiveDecimalObjectInspector; + + ColumnStatisticsImpl stats1 = ColumnStatisticsImpl.create(inspector); + ColumnStatisticsImpl stats2 = ColumnStatisticsImpl.create(inspector); + stats1.updateDecimal(HiveDecimal.create(10)); + stats1.updateDecimal(HiveDecimal.create(100)); + stats2.updateDecimal(HiveDecimal.create(1)); + stats2.updateDecimal(HiveDecimal.create(1000)); + stats1.merge(stats2); + DecimalColumnStatistics typed = (DecimalColumnStatistics) stats1; + assertEquals(1, typed.getMinimum().longValue()); + assertEquals(1000, typed.getMaximum().longValue()); + stats1.reset(); + stats1.updateDecimal(HiveDecimal.create(-10)); + stats1.updateDecimal(HiveDecimal.create(10000)); + stats1.merge(stats2); + assertEquals(-10, typed.getMinimum().longValue()); + assertEquals(10000, typed.getMaximum().longValue()); + } +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 697f59b..d1acd88 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -21,27 +21,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import java.io.DataInput; -import java.io.DataOutput; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.sql.Date; -import java.sql.Timestamp; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.TimeZone; -import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataInputStream; @@ -66,9 +45,9 @@ import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.io.InputFormatChecker; -import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory; import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.TableDesc; @@ -104,6 +83,27 @@ import org.junit.Test; import org.junit.rules.TestName; +import java.io.DataInput; +import java.io.DataOutput; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.sql.Date; +import java.sql.Timestamp; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.TimeZone; +import java.util.TreeSet; + public class TestInputOutputFormat { Path workDir = new Path(System.getProperty("test.tmp.dir","target/tmp")); @@ -1032,6 +1032,24 @@ public void testInOutFormat() throws Exception { reader.close(); } + static class SimpleRow implements Writable { + Text z; + + public SimpleRow(Text t) { + this.z = t; + } + + @Override + public void write(DataOutput dataOutput) throws IOException { + throw new UnsupportedOperationException("unsupported"); + } + + @Override + public void readFields(DataInput dataInput) throws IOException { + throw new UnsupportedOperationException("unsupported"); + } + } + static class NestedRow implements Writable { int z; MyRow r; @@ -1620,14 +1638,14 @@ public void testCombinationInputFormatWithAcid() throws Exception { assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00000", split.getPath().toString()); assertEquals(0, split.getStart()); - assertEquals(580, split.getLength()); + assertEquals(582, split.getLength()); split = (HiveInputFormat.HiveInputSplit) splits[1]; assertEquals("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat", split.inputFormatClassName()); assertEquals("mock:/combinationAcid/p=0/base_0000010/bucket_00001", split.getPath().toString()); assertEquals(0, split.getStart()); - assertEquals(601, split.getLength()); + assertEquals(603, split.getLength()); CombineHiveInputFormat.CombineHiveInputSplit combineSplit = (CombineHiveInputFormat.CombineHiveInputSplit) splits[2]; assertEquals(BUCKETS, combineSplit.getNumPaths()); @@ -1635,7 +1653,7 @@ public void testCombinationInputFormatWithAcid() throws Exception { assertEquals("mock:/combinationAcid/p=1/00000" + bucket + "_0", combineSplit.getPath(bucket).toString()); assertEquals(0, combineSplit.getOffset(bucket)); - assertEquals(225, combineSplit.getLength(bucket)); + assertEquals(227, combineSplit.getLength(bucket)); } String[] hosts = combineSplit.getLocations(); assertEquals(2, hosts.length); @@ -1685,4 +1703,89 @@ public void testSetSearchArgument() throws Exception { assertEquals("cost", leaves.get(0).getColumnName()); assertEquals(PredicateLeaf.Operator.IS_NULL, leaves.get(0).getOperator()); } + + @Test + @SuppressWarnings("unchecked,deprecation") + public void testSplitElimination() throws Exception { + Properties properties = new Properties(); + StructObjectInspector inspector; + synchronized (TestOrcFile.class) { + inspector = (StructObjectInspector) + ObjectInspectorFactory.getReflectionObjectInspector(NestedRow.class, + ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + } + SerDe serde = new OrcSerde(); + OutputFormat outFormat = new OrcOutputFormat(); + conf.setInt("mapred.max.split.size", 50); + RecordWriter writer = + outFormat.getRecordWriter(fs, conf, testFilePath.toString(), + Reporter.NULL); + writer.write(NullWritable.get(), + serde.serialize(new NestedRow(1,2,3), inspector)); + writer.write(NullWritable.get(), + serde.serialize(new NestedRow(4,5,6), inspector)); + writer.write(NullWritable.get(), + serde.serialize(new NestedRow(7,8,9), inspector)); + writer.close(Reporter.NULL); + serde = new OrcSerde(); + SearchArgument sarg = + SearchArgumentFactory.newBuilder() + .startAnd() + .lessThan("z", new Integer(0)) + .end() + .build(); + conf.set("sarg.pushdown", sarg.toKryo()); + conf.set("hive.io.file.readcolumn.names", "z,r"); + properties.setProperty("columns", "z,r"); + properties.setProperty("columns.types", "int:struct"); + SerDeUtils.initializeSerDe(serde, conf, properties, null); + inspector = (StructObjectInspector) serde.getObjectInspector(); + InputFormat in = new OrcInputFormat(); + FileInputFormat.setInputPaths(conf, testFilePath.toString()); + InputSplit[] splits = in.getSplits(conf, 1); + assertEquals(0, splits.length); + } + + @Test + @SuppressWarnings("unchecked,deprecation") + public void testSplitEliminationNullStats() throws Exception { + Properties properties = new Properties(); + StructObjectInspector inspector; + synchronized (TestOrcFile.class) { + inspector = (StructObjectInspector) + ObjectInspectorFactory.getReflectionObjectInspector(SimpleRow.class, + ObjectInspectorFactory.ObjectInspectorOptions.JAVA); + } + SerDe serde = new OrcSerde(); + OutputFormat outFormat = new OrcOutputFormat(); + conf.setInt("mapred.max.split.size", 50); + RecordWriter writer = + outFormat.getRecordWriter(fs, conf, testFilePath.toString(), + Reporter.NULL); + writer.write(NullWritable.get(), + serde.serialize(new SimpleRow(null), inspector)); + writer.write(NullWritable.get(), + serde.serialize(new SimpleRow(null), inspector)); + writer.write(NullWritable.get(), + serde.serialize(new SimpleRow(null), inspector)); + writer.close(Reporter.NULL); + serde = new OrcSerde(); + SearchArgument sarg = + SearchArgumentFactory.newBuilder() + .startAnd() + .lessThan("z", new String("foo")) + .end() + .build(); + conf.set("sarg.pushdown", sarg.toKryo()); + conf.set("hive.io.file.readcolumn.names", "z"); + properties.setProperty("columns", "z"); + properties.setProperty("columns.types", "string"); + SerDeUtils.initializeSerDe(serde, conf, properties, null); + inspector = (StructObjectInspector) serde.getObjectInspector(); + InputFormat in = new OrcInputFormat(); + FileInputFormat.setInputPaths(conf, testFilePath.toString()); + InputSplit[] splits = in.getSplits(conf, 1); + assertEquals(0, splits.length); + } + } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/primitive/TestParquetByteInspector.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/primitive/TestParquetByteInspector.java index f1ffca6..7d1b9a5 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/primitive/TestParquetByteInspector.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/primitive/TestParquetByteInspector.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.io.parquet.serde.primitive; import static org.junit.Assert.assertEquals; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/primitive/TestParquetShortInspector.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/primitive/TestParquetShortInspector.java index 6538d35..f5803f2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/primitive/TestParquetShortInspector.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/serde/primitive/TestParquetShortInspector.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.io.parquet.serde.primitive; import static org.junit.Assert.assertEquals; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java b/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java index 831ef8c..c91644c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java @@ -2830,7 +2830,7 @@ public void testBuilderComplexTypes() throws Exception { .build(); assertEquals("leaf-0 = (LESS_THAN x 1970-01-11)\n" + "leaf-1 = (LESS_THAN_EQUALS y hi)\n" + - "leaf-2 = (EQUALS z 1.0)\n" + + "leaf-2 = (EQUALS z 1)\n" + "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString()); sarg = SearchArgumentFactory.newBuilder() diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java index 87ef193..5f0659b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.exec.ExplainTask; @@ -267,7 +268,7 @@ private ReturnInfo parseAndAnalyze(String query, String testName) // I have to create the tables here (rather than in setup()) because I need the Hive // connection, which is conviently created by the semantic analyzer. Map params = new HashMap(1); - params.put(SemanticAnalyzer.ACID_TABLE_PROPERTY, "true"); + params.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true"); db.createTable("T", Arrays.asList("a", "b"), null, OrcInputFormat.class, OrcOutputFormat.class, 2, Arrays.asList("a"), params); db.createTable("U", Arrays.asList("a", "b"), Arrays.asList("ds"), OrcInputFormat.class, diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java index e49ba05..038e5fd 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/authorization/TestHiveAuthorizationTaskFactory.java @@ -22,18 +22,23 @@ import junit.framework.Assert; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer; +import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.ParseDriver; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.GrantDesc; import org.apache.hadoop.hive.ql.plan.GrantRevokeRoleDDL; import org.apache.hadoop.hive.ql.plan.PrincipalDesc; import org.apache.hadoop.hive.ql.plan.PrivilegeDesc; +import org.apache.hadoop.hive.ql.plan.PrivilegeObjectDesc; import org.apache.hadoop.hive.ql.plan.RevokeDesc; import org.apache.hadoop.hive.ql.plan.RoleDDLDesc; import org.apache.hadoop.hive.ql.plan.RoleDDLDesc.RoleOperation; @@ -47,6 +52,33 @@ public class TestHiveAuthorizationTaskFactory { + public static class DummyHiveAuthorizationTaskFactoryImpl extends HiveAuthorizationTaskFactoryImpl { + + static String uriPath = ""; + static String serverName = ""; + + public DummyHiveAuthorizationTaskFactoryImpl(HiveConf conf, Hive db) { + super(conf, db); + } + + @Override + protected PrivilegeObjectDesc parsePrivObject(ASTNode ast) throws SemanticException { + ASTNode child = (ASTNode) ast.getChild(0); + ASTNode gchild = (ASTNode)child.getChild(0); + if (child.getType() == HiveParser.TOK_URI_TYPE) { + uriPath = gchild.getText().replaceAll("'", "").replaceAll("\"", ""); + } else if (child.getType() == HiveParser.TOK_SERVER_TYPE) { + serverName = gchild.getText(); + } + return super.parsePrivObject(ast); + } + + public static void reset() { + uriPath = ""; + serverName = ""; + } + } + private static final String SELECT = "SELECT"; private static final String DB = "default"; private static final String TABLE = "table1"; @@ -67,6 +99,8 @@ @Before public void setup() throws Exception { conf = new HiveConf(); + conf.setVar(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY, + TestHiveAuthorizationTaskFactory.DummyHiveAuthorizationTaskFactoryImpl.class.getName()); db = Mockito.mock(Hive.class); table = new Table(DB, TABLE); partition = new Partition(table); @@ -81,6 +115,7 @@ public void setup() throws Exception { HadoopDefaultAuthenticator auth = new HadoopDefaultAuthenticator(); auth.setConf(conf); currentUser = auth.getUserName(); + DummyHiveAuthorizationTaskFactoryImpl.reset(); } /** @@ -414,6 +449,34 @@ public void testShowGrantGroupOnTable() throws Exception { Assert.assertTrue("Expected table", grantDesc.getHiveObj().getTable()); } + /** + * GRANT ALL ON URI + */ + @Test + public void testGrantUri() throws Exception { + String uriPath = "/tmp"; + try { + analyze("GRANT ALL ON URI '" + uriPath + "' TO USER user2"); + Assert.fail("Grant on URI should fail"); + } catch (SemanticException e) { + Assert.assertEquals(uriPath, DummyHiveAuthorizationTaskFactoryImpl.uriPath); + } + } + + /** + * GRANT ALL ON SERVER + */ + @Test + public void testGrantServer() throws Exception { + String serverName = "foo"; + try { + analyze("GRANT ALL ON SERVER " + serverName + " TO USER user2"); + Assert.fail("Grant on Server should fail"); + } catch (SemanticException e) { + Assert.assertEquals(serverName, DummyHiveAuthorizationTaskFactoryImpl.serverName); + } + } + private DDLWork analyze(String command) throws Exception { return AuthorizationTestUtil.analyze(command, conf, db); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index 9ebdfd3..d68e431 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -52,6 +52,7 @@ import java.util.Map; import java.util.Properties; import java.util.Stack; +import java.util.concurrent.atomic.AtomicBoolean; /** * Super class for all of the compactor test modules. @@ -65,7 +66,7 @@ protected long sleepTime = 1000; protected HiveConf conf; - private final MetaStoreThread.BooleanPointer stop = new MetaStoreThread.BooleanPointer(); + private final AtomicBoolean stop = new AtomicBoolean(); private final File tmpdir; protected CompactorTest() throws Exception { @@ -92,7 +93,7 @@ protected void startCleaner() throws Exception { startThread('c', true); } - protected void startCleaner(MetaStoreThread.BooleanPointer looped) throws Exception { + protected void startCleaner(AtomicBoolean looped) throws Exception { startThread('c', false, looped); } @@ -190,7 +191,7 @@ protected void burnThroughTransactions(int num) throws MetaException, NoSuchTxnE } protected void stopThread() { - stop.boolVal = true; + stop.set(true); } private StorageDescriptor newStorageDescriptor(String location, List sortCols) { @@ -218,10 +219,10 @@ private StorageDescriptor newStorageDescriptor(String location, List sort // I can't do this with @Before because I want to be able to control when the thead starts private void startThread(char type, boolean stopAfterOne) throws Exception { - startThread(type, stopAfterOne, new MetaStoreThread.BooleanPointer()); + startThread(type, stopAfterOne, new AtomicBoolean()); } - private void startThread(char type, boolean stopAfterOne, MetaStoreThread.BooleanPointer looped) + private void startThread(char type, boolean stopAfterOne, AtomicBoolean looped) throws Exception { TxnDbUtil.setConfValues(conf); CompactorThread t = null; @@ -233,7 +234,7 @@ private void startThread(char type, boolean stopAfterOne, MetaStoreThread.Boolea } t.setThreadId((int) t.getId()); t.setHiveConf(conf); - stop.boolVal = stopAfterOne; + stop.set(stopAfterOne); t.init(stop, looped); if (stopAfterOne) t.run(); else t.start(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java index b63ad66..7687851 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java @@ -30,6 +30,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; /** * Tests for the compactor Cleaner thread @@ -285,12 +286,12 @@ public void notBlockedBySubsequentLock() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); LockResponse res = txnHandler.lock(req); - MetaStoreThread.BooleanPointer looped = new MetaStoreThread.BooleanPointer(); - looped.boolVal = false; + AtomicBoolean looped = new AtomicBoolean(); + looped.set(false); startCleaner(looped); // Make sure the compactor has a chance to run once - while (!looped.boolVal) { + while (!looped.get()) { Thread.currentThread().sleep(100); } @@ -310,9 +311,9 @@ public void notBlockedBySubsequentLock() throws Exception { // Unlock the previous lock txnHandler.unlock(new UnlockRequest(res.getLockid())); - looped.boolVal = false; + looped.set(false); - while (!looped.boolVal) { + while (!looped.get()) { Thread.currentThread().sleep(100); } stopThread(); @@ -356,12 +357,12 @@ public void partitionNotBlockedBySubsequentLock() throws Exception { LockRequest req = new LockRequest(components, "me", "localhost"); LockResponse res = txnHandler.lock(req); - MetaStoreThread.BooleanPointer looped = new MetaStoreThread.BooleanPointer(); - looped.boolVal = false; + AtomicBoolean looped = new AtomicBoolean(); + looped.set(false); startCleaner(looped); // Make sure the compactor has a chance to run once - while (!looped.boolVal) { + while (!looped.get()) { Thread.currentThread().sleep(100); } @@ -383,9 +384,9 @@ public void partitionNotBlockedBySubsequentLock() throws Exception { // Unlock the previous lock txnHandler.unlock(new UnlockRequest(res.getLockid())); - looped.boolVal = false; + looped.set(false); - while (!looped.boolVal) { + while (!looped.get()) { Thread.currentThread().sleep(100); } stopThread(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java index 7d88c1a..b27316d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java @@ -243,6 +243,32 @@ public void noCompactWhenNoCompactSet() throws Exception { } @Test + public void noCompactWhenNoCompactSetLowerCase() throws Exception { + Map parameters = new HashMap(1); + parameters.put("no_auto_compaction", "true"); + Table t = newTable("default", "ncwncs", false, parameters); + + HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD, 10); + + for (int i = 0; i < 11; i++) { + long txnid = openTxn(); + LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default"); + comp.setTablename("ncwncs"); + List components = new ArrayList(1); + components.add(comp); + LockRequest req = new LockRequest(components, "me", "localhost"); + req.setTxnid(txnid); + LockResponse res = txnHandler.lock(req); + txnHandler.abortTxn(new AbortTxnRequest(txnid)); + } + + startInitiator(); + + ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest()); + Assert.assertEquals(0, rsp.getCompactsSize()); + } + + @Test public void noCompactWhenCompactAlreadyScheduled() throws Exception { Table t = newTable("default", "ncwcas", false); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java index ce578a0..4c5b3a5 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java @@ -187,7 +187,7 @@ public void testDecimalDivideDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.getDecimalTypeInfo(11, 7), oi.getTypeInfo()); HiveDecimalWritable res = (HiveDecimalWritable) udf.evaluate(args); - Assert.assertEquals(HiveDecimal.create("0.0617100"), res.getHiveDecimal()); + Assert.assertEquals(HiveDecimal.create("0.06171"), res.getHiveDecimal()); } @Test diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNumeric.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNumeric.java index 75f2b3a..9a56ac1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNumeric.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPNumeric.java @@ -1,3 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.ql.udf.generic; import org.apache.hadoop.hive.conf.HiveConf; diff --git a/ql/src/test/queries/clientnegative/authorization_grant_server.q b/ql/src/test/queries/clientnegative/authorization_grant_server.q new file mode 100644 index 0000000..3c2ef0f --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_grant_server.q @@ -0,0 +1,8 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; + +set user.name=user1; + +-- grant insert on group should fail +GRANT ALL ON SERVER foo TO user user2; diff --git a/ql/src/test/queries/clientnegative/authorization_grant_uri.q b/ql/src/test/queries/clientnegative/authorization_grant_uri.q new file mode 100644 index 0000000..efe0492 --- /dev/null +++ b/ql/src/test/queries/clientnegative/authorization_grant_uri.q @@ -0,0 +1,9 @@ +set hive.test.authz.sstd.hs2.mode=true; +set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactoryForTest; +set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; + +set user.name=user1; +-- current user has been set (comment line before the set cmd is resulting in parse error!!) + +-- grant insert on group should fail +GRANT ALL ON URI '/tmp' TO user user2; diff --git a/ql/src/test/queries/clientnegative/gby_star.q b/ql/src/test/queries/clientnegative/gby_star.q new file mode 100644 index 0000000..477c77c --- /dev/null +++ b/ql/src/test/queries/clientnegative/gby_star.q @@ -0,0 +1 @@ +select *, count(value) from src group by key; diff --git a/ql/src/test/queries/clientnegative/gby_star2.q b/ql/src/test/queries/clientnegative/gby_star2.q new file mode 100644 index 0000000..7348e72 --- /dev/null +++ b/ql/src/test/queries/clientnegative/gby_star2.q @@ -0,0 +1 @@ +select *, sum(key) from src; diff --git a/ql/src/test/queries/clientnegative/timestamp_literal.q b/ql/src/test/queries/clientnegative/timestamp_literal.q new file mode 100644 index 0000000..1360dc9 --- /dev/null +++ b/ql/src/test/queries/clientnegative/timestamp_literal.q @@ -0,0 +1,2 @@ +-- TimeZone is not yet supported +SELECT TIMESTAMP '2012-12-29 20:01:00 +03:00'; diff --git a/ql/src/test/queries/clientpositive/acid_join.q b/ql/src/test/queries/clientpositive/acid_join.q new file mode 100644 index 0000000..2e6aeae --- /dev/null +++ b/ql/src/test/queries/clientpositive/acid_join.q @@ -0,0 +1,15 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +-- This test checks that a join with tables with two different buckets send the right bucket info to each table. +create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true"); +create table acidjoin2(name varchar(50), gpa decimal(3, 2)) clustered by (gpa) into 4 buckets stored as orc TBLPROPERTIES ("transactional"="true"); +create table acidjoin3(name varchar(50), age int, gpa decimal(3, 2)) clustered by (gpa) into 8 buckets stored as orc TBLPROPERTIES ("transactional"="true"); + +insert into table acidjoin1 values ('aaa', 35), ('bbb', 32), ('ccc', 32), ('ddd', 35), ('eee', 32); +insert into table acidjoin2 values ('aaa', 3.00), ('bbb', 3.01), ('ccc', 3.02), ('ddd', 3.03), ('eee', 3.04); + +insert into table acidjoin3 select a.name, age, gpa from acidjoin1 a join acidjoin2 b on (a.name = b.name); +select * from acidjoin3 order by name; + diff --git a/ql/src/test/queries/clientpositive/create_like.q b/ql/src/test/queries/clientpositive/create_like.q index b1785f5..69e47ab 100644 --- a/ql/src/test/queries/clientpositive/create_like.q +++ b/ql/src/test/queries/clientpositive/create_like.q @@ -64,3 +64,9 @@ DESCRIBE FORMATTED doctors; CREATE TABLE doctors2 like doctors; DESCRIBE FORMATTED doctors2; + +CREATE TABLE PropertiedParquetTable(a INT, b STRING) STORED AS PARQUET TBLPROPERTIES("parquet.compression"="LZO"); +CREATE TABLE LikePropertiedParquetTable LIKE PropertiedParquetTable; + +DESCRIBE FORMATTED LikePropertiedParquetTable; + diff --git a/ql/src/test/queries/clientpositive/ctas_colname.q b/ql/src/test/queries/clientpositive/ctas_colname.q index 890971e..d794e99 100644 --- a/ql/src/test/queries/clientpositive/ctas_colname.q +++ b/ql/src/test/queries/clientpositive/ctas_colname.q @@ -26,20 +26,20 @@ select * from x5; -- sub queries explain -create table x6 as select * from (select *, max(key) from src1) a; -create table x6 as select * from (select *, max(key) from src1) a; +create table x6 as select * from (select *, key + 1 from src1) a; +create table x6 as select * from (select *, key + 1 from src1) a; describe formatted x6; select * from x6; explain -create table x7 as select * from (select * from src group by key) a; -create table x7 as select * from (select * from src group by key) a; +create table x7 as select * from (select *, count(value) from src group by key, value) a; +create table x7 as select * from (select *, count(value) from src group by key, value) a; describe formatted x7; select * from x7; explain -create table x8 as select * from (select * from src group by key having key < 9) a; -create table x8 as select * from (select * from src group by key having key < 9) a; +create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a; +create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a; describe formatted x8; select * from x8; diff --git a/ql/src/test/queries/clientpositive/database_drop.q b/ql/src/test/queries/clientpositive/database_drop.q index 1371273..c8f6b0a 100644 --- a/ql/src/test/queries/clientpositive/database_drop.q +++ b/ql/src/test/queries/clientpositive/database_drop.q @@ -75,6 +75,12 @@ CREATE EXTERNAL TABLE extab1(id INT, name STRING) ROW FORMAT STORED AS TEXTFILE LOCATION 'file:${system:test.tmp.dir}/dbcascade/extab1'; +-- add a table, create index (give a name for index table) +CREATE TABLE temp_tbl3 (id INT, name STRING); +LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl3; +CREATE INDEX temp_tbl3_idx ON TABLE temp_tbl3(id) AS 'COMPACT' with DEFERRED REBUILD IN TABLE temp_tbl3_idx_tbl; +ALTER INDEX temp_tbl3_idx ON temp_tbl3 REBUILD; + -- drop the database with cascade DROP DATABASE db5 CASCADE; diff --git a/ql/src/test/queries/clientpositive/decimal_join2.q b/ql/src/test/queries/clientpositive/decimal_join2.q new file mode 100644 index 0000000..fb45367 --- /dev/null +++ b/ql/src/test/queries/clientpositive/decimal_join2.q @@ -0,0 +1,26 @@ +DROP TABLE IF EXISTS DECIMAL_3_txt; +DROP TABLE IF EXISTS DECIMAL_3; + +CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt; + +CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt; + +set hive.auto.convert.join=false; +EXPLAIN +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value; + +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value; + +set hive.auto.convert.join=true; +EXPLAIN +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value; + +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value; + +DROP TABLE DECIMAL_3_txt; +DROP TABLE DECIMAL_3; diff --git a/ql/src/test/queries/clientpositive/gby_star.q b/ql/src/test/queries/clientpositive/gby_star.q new file mode 100644 index 0000000..2f7952c --- /dev/null +++ b/ql/src/test/queries/clientpositive/gby_star.q @@ -0,0 +1,17 @@ +explain +select *, sum(key) from src group by key, value limit 10; +select *, sum(key) from src group by key, value limit 10; + +explain +select *, sum(key) from src where key < 100 group by key, value limit 10; +select *, sum(key) from src where key < 100 group by key, value limit 10; + +explain +select *, sum(key) from (select key from src where key < 100) a group by key limit 10; +select *, sum(key) from (select key from src where key < 100) a group by key limit 10; + +explain +select a.*, sum(src.key) from (select key from src where key < 100) a +inner join src on a.key = src.key group by a.key limit 10; +select a.*, sum(src.key) from (select key from src where key < 100) a +inner join src on a.key = src.key group by a.key limit 10; diff --git a/ql/src/test/queries/clientpositive/index_skewtable.q b/ql/src/test/queries/clientpositive/index_skewtable.q new file mode 100644 index 0000000..fdf65cd --- /dev/null +++ b/ql/src/test/queries/clientpositive/index_skewtable.q @@ -0,0 +1,22 @@ +-- Test creating an index on skewed table + +-- Create a skew table +CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv; + +-- Create and build an index +CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD; +DESCRIBE FORMATTED default__kv_kv_index__; +ALTER INDEX kv_index ON kv REBUILD; + +SET hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; +SET hive.optimize.index.filter=true; +SET hive.optimize.index.filter.compact.minsize=0; + +-- Run a query that uses the index +EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value; +SELECT * FROM kv WHERE value > '15' ORDER BY value; + +DROP INDEX kv_index ON kv; +DROP TABLE kv; diff --git a/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q new file mode 100644 index 0000000..1bb2ee3 --- /dev/null +++ b/ql/src/test/queries/clientpositive/insert_nonacid_from_acid.q @@ -0,0 +1,13 @@ +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +set hive.enforce.bucketing=true; + +-- This test checks that selecting from an acid table and inserting into a non-acid table works. +create table sample_06(name varchar(50), age int, gpa decimal(3, 2)) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true"); +insert into table sample_06 values ('aaa', 35, 3.00), ('bbb', 32, 3.00), ('ccc', 32, 3.00), ('ddd', 35, 3.00), ('eee', 32, 3.00); +select * from sample_06 where gpa = 3.00; + +create table tab1 (name varchar(50), age int, gpa decimal(3, 2)); +insert into table tab1 select * from sample_06 where gpa = 3.00; +select * from tab1; + diff --git a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q index 59cc221..0348948 100644 --- a/ql/src/test/queries/clientpositive/orc_merge_incompat1.q +++ b/ql/src/test/queries/clientpositive/orc_merge_incompat1.q @@ -19,6 +19,7 @@ insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc set hive.exec.orc.write.format=0.11; insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; +insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13; -- 5 files total analyze table orc_merge5b compute statistics noscan; diff --git a/ql/src/test/queries/clientpositive/parquet_create.q b/ql/src/test/queries/clientpositive/parquet_create.q index 0b976bd..f2eea6e 100644 --- a/ql/src/test/queries/clientpositive/parquet_create.q +++ b/ql/src/test/queries/clientpositive/parquet_create.q @@ -28,7 +28,6 @@ SELECT * FROM parquet_create_staging; INSERT OVERWRITE TABLE parquet_create SELECT * FROM parquet_create_staging; -SELECT * FROM parquet_create group by id; SELECT id, count(0) FROM parquet_create group by id; SELECT str from parquet_create; SELECT mp from parquet_create; diff --git a/ql/src/test/queries/clientpositive/partition_multilevels.q b/ql/src/test/queries/clientpositive/partition_multilevels.q new file mode 100644 index 0000000..bde4c62 --- /dev/null +++ b/ql/src/test/queries/clientpositive/partition_multilevels.q @@ -0,0 +1,93 @@ +create table partition_test_multilevel (key string, value string) partitioned by (level1 string, level2 string, level3 string); + +insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows); + +insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows); + +insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows); +insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows); + +insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows); + +insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows); + +insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows); +insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows); + +set metaconf:hive.metastore.try.direct.sql=false; + +-- beginning level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level1 >= '2222' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level1 !='2222' group by level1, level2, level3; + +-- middle level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 = '222' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level2 <= '222' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level2 != '222' group by level1, level2, level3; + +-- ending level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level3 = '22' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level3 >= '22' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level3 != '22' group by level1, level2, level3; + +-- two different levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 >= '222' and level3 = '33' group by level1, level2, level3; + +select level1, level2, level3, count(*) from partition_test_multilevel where level1 <= '1111' and level3 >= '33' group by level1, level2, level3; + + +-- all levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' and level2 >= '222' and level3 <= '33' group by level1, level2, level3; + +-- between +select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3; + +explain select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3; + +set metaconf:hive.metastore.try.direct.sql=true; + +-- beginning level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level1 >= '2222' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level1 !='2222' group by level1, level2, level3; + +-- middle level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 = '222' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level2 <= '222' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level2 != '222' group by level1, level2, level3; + +-- ending level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level3 = '22' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level3 >= '22' group by level1, level2, level3; +select level1, level2, level3, count(*) from partition_test_multilevel where level3 != '22' group by level1, level2, level3; + +-- two different levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 >= '222' and level3 = '33' group by level1, level2, level3; + +select level1, level2, level3, count(*) from partition_test_multilevel where level1 <= '1111' and level3 >= '33' group by level1, level2, level3; + + +-- all levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' and level2 >= '222' and level3 <= '33' group by level1, level2, level3; + +-- between +select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3; + +explain select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3; diff --git a/ql/src/test/queries/clientpositive/partition_timestamp.q b/ql/src/test/queries/clientpositive/partition_timestamp.q new file mode 100644 index 0000000..aa1a0c0 --- /dev/null +++ b/ql/src/test/queries/clientpositive/partition_timestamp.q @@ -0,0 +1,57 @@ +drop table partition_timestamp_1; + +create table partition_timestamp_1 (key string, value string) partitioned by (dt timestamp, region string); + +insert overwrite table partition_timestamp_1 partition(dt='2000-01-01 01:00:00', region= '1') + select * from src tablesample (10 rows); +insert overwrite table partition_timestamp_1 partition(dt='2000-01-01 02:00:00', region= '2') + select * from src tablesample (5 rows); +insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 01:00:00', region= '2020-20-20') + select * from src tablesample (5 rows); +insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 02:00:00', region= '1') + select * from src tablesample (20 rows); +insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 03:00:00', region= '10') + select * from src tablesample (11 rows); + +select distinct dt from partition_timestamp_1; +select * from partition_timestamp_1 where dt = '2000-01-01 01:00:00' and region = '2' order by key,value; + +-- 10 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00'; +-- 10. Also try with string value in predicate +select count(*) from partition_timestamp_1 where dt = '2000-01-01 01:00:00'; +-- 5 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 02:00:00' and region = '2'; +-- 11 +select count(*) from partition_timestamp_1 where dt = timestamp '2001-01-01 03:00:00' and region = '10'; +-- 30 +select count(*) from partition_timestamp_1 where region = '1'; +-- 0 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' and region = '3'; +-- 0 +select count(*) from partition_timestamp_1 where dt = timestamp '1999-01-01 01:00:00'; + +-- Try other comparison operations + +-- 20 +select count(*) from partition_timestamp_1 where dt > timestamp '2000-01-01 01:00:00' and region = '1'; +-- 10 +select count(*) from partition_timestamp_1 where dt < timestamp '2000-01-02 01:00:00' and region = '1'; +-- 20 +select count(*) from partition_timestamp_1 where dt >= timestamp '2000-01-02 01:00:00' and region = '1'; +-- 10 +select count(*) from partition_timestamp_1 where dt <= timestamp '2000-01-01 01:00:00' and region = '1'; +-- 20 +select count(*) from partition_timestamp_1 where dt <> timestamp '2000-01-01 01:00:00' and region = '1'; +-- 10 +select count(*) from partition_timestamp_1 where dt between timestamp '1999-12-30 12:00:00' and timestamp '2000-01-03 12:00:00' and region = '1'; + + +-- Try a string key with timestamp-like strings + +-- 5 +select count(*) from partition_timestamp_1 where region = '2020-20-20'; +-- 5 +select count(*) from partition_timestamp_1 where region > '2010-01-01'; + +drop table partition_timestamp_1; diff --git a/ql/src/test/queries/clientpositive/partition_timestamp2.q b/ql/src/test/queries/clientpositive/partition_timestamp2.q new file mode 100644 index 0000000..1f5ec26 --- /dev/null +++ b/ql/src/test/queries/clientpositive/partition_timestamp2.q @@ -0,0 +1,56 @@ +drop table partition_timestamp2_1; + +create table partition_timestamp2_1 (key string, value string) partitioned by (dt timestamp, region int); + +-- test timestamp literal syntax +from (select * from src tablesample (1 rows)) x +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 01:00:00', region=1) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1999-01-01 00:00:00', region=2) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1999-01-01 01:00:00', region=2) select *; + +select distinct dt from partition_timestamp2_1; +select * from partition_timestamp2_1; + +-- insert overwrite +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) + select 'changed_key', 'changed_value' from src tablesample (2 rows); +select * from partition_timestamp2_1; + +-- truncate +truncate table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1); +select distinct dt from partition_timestamp2_1; +select * from partition_timestamp2_1; + +-- alter table add partition +alter table partition_timestamp2_1 add partition (dt=timestamp '1980-01-02 00:00:00', region=3); +select distinct dt from partition_timestamp2_1; +select * from partition_timestamp2_1; + +-- alter table drop +alter table partition_timestamp2_1 drop partition (dt=timestamp '1999-01-01 01:00:00', region=2); +select distinct dt from partition_timestamp2_1; +select * from partition_timestamp2_1; + +-- alter table set serde +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe'; + +-- alter table set fileformat +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + set fileformat rcfile; +describe extended partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3); + +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + select * from src tablesample (2 rows); +select * from partition_timestamp2_1 order by key,value,dt,region; + +-- alter table set location +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + set location "file:///tmp/partition_timestamp2_1"; +describe extended partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3); + +-- alter table touch +alter table partition_timestamp2_1 touch partition(dt=timestamp '1980-01-02 00:00:00', region=3); + +drop table partition_timestamp2_1; diff --git a/ql/src/test/queries/clientpositive/partition_type_in_plan.q b/ql/src/test/queries/clientpositive/partition_type_in_plan.q new file mode 100644 index 0000000..693dd24 --- /dev/null +++ b/ql/src/test/queries/clientpositive/partition_type_in_plan.q @@ -0,0 +1,15 @@ +-- Test partition column type is considered as the type given in table def +-- and not as 'string' +CREATE TABLE datePartTbl(col1 string) PARTITIONED BY (date_prt date); + +-- Add test partitions and some sample data +INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-09') + SELECT 'col1-2014-08-09' FROM src LIMIT 1; + +INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-10') + SELECT 'col1-2014-08-10' FROM src LIMIT 1; + +-- Query where 'date_prt' value is restricted to given values in IN operator. +SELECT * FROM datePartTbl WHERE date_prt IN (CAST('2014-08-09' AS DATE), CAST('2014-08-08' AS DATE)); + +DROP TABLE datePartTbl; diff --git a/ql/src/test/queries/clientpositive/timestamp_literal.q b/ql/src/test/queries/clientpositive/timestamp_literal.q new file mode 100644 index 0000000..2a7b91b --- /dev/null +++ b/ql/src/test/queries/clientpositive/timestamp_literal.q @@ -0,0 +1,12 @@ +explain +select timestamp '2011-01-01 01:01:01'; +select timestamp '2011-01-01 01:01:01'; + +explain +select '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100'; +select '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100'; + +explain +select 1 where timestamp '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100'; +select 1 where timestamp '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100'; + diff --git a/ql/src/test/resources/orc-file-dump-dictionary-threshold.out b/ql/src/test/resources/orc-file-dump-dictionary-threshold.out index 380f0e0..519fa07 100644 --- a/ql/src/test/resources/orc-file-dump-dictionary-threshold.out +++ b/ql/src/test/resources/orc-file-dump-dictionary-threshold.out @@ -1,4 +1,5 @@ Structure for TestFileDump.testDump.orc +File Version: 0.12 with HIVE_8732 Rows: 21000 Compression: ZLIB Compression size: 10000 @@ -182,6 +183,6 @@ Stripes: Row group index column 3: Entry 0: count: 1000 min: Darkness,-230-368-488-586-862-930-1686-2044-2636-2652-2872-3108-3162-3192-3404-3442-3508-3542-3550-3712-3980-4146-4204-4336-4390-4418-4424-4490-4512-4650-4768-4924-4950-5210-5524-5630-5678-5710-5758-5952-6238-6252-6300-6366-6668-6712-6926-6942-7100-7194-7802-8030-8452-8608-8640-8862-8868-9134-9234-9412-9602-9608-9642-9678-9740-9780-10426-10510-10514-10706-10814-10870-10942-11028-11244-11326-11462-11496-11656-11830-12022-12178-12418-12832-13304-13448-13590-13618-13908-14188-14246-14340-14364-14394-14762-14850-14964-15048-15494-15674-15726-16006-16056-16180-16304-16332-16452-16598-16730-16810-16994-17210-17268-17786-17962-18214-18444-18446-18724-18912-18952-19164-19348-19400-19546-19776-19896-20084 max: worst-54-290-346-648-908-996-1038-1080-1560-1584-1620-1744-1770-1798-1852-1966-2162-2244-2286-2296-2534-2660-3114-3676-3788-4068-4150-4706-4744-5350-5420-5582-5696-5726-6006-6020-6024-6098-6184-6568-6636-6802-6994-7004-7318-7498-7758-7780-7798-7920-7952-7960-7988-8232-8256-8390-8416-8478-8620-8840-8984-9038-9128-9236-9248-9344-9594-9650-9714-9928-9938-10178-10368-10414-10502-10732-10876-11008-11158-11410-11722-11836-11964-12054-12096-12126-12136-12202-12246-12298-12616-12774-12782-12790-12802-12976-13216-13246-13502-13766-14454-14974-15004-15124-15252-15294-15356-15530-15610-16316-16936-17024-17122-17214-17310-17528-17682-17742-17870-17878-18010-18410-18524-18788-19204-19254-19518-19596-19786-19874-19904-20390-20752-20936 positions: 0,0,0,0,0 -File length: 2033557 bytes +File length: 2033559 bytes Padding length: 0 bytes Padding ratio: 0% diff --git a/ql/src/test/resources/orc-file-dump.out b/ql/src/test/resources/orc-file-dump.out index d67b53a..d0cb1be 100644 --- a/ql/src/test/resources/orc-file-dump.out +++ b/ql/src/test/resources/orc-file-dump.out @@ -1,4 +1,5 @@ Structure for TestFileDump.testDump.orc +File Version: 0.12 with HIVE_8732 Rows: 21000 Compression: ZLIB Compression size: 10000 @@ -187,6 +188,6 @@ Stripes: Row group index column 3: Entry 0: count: 1000 min: Darkness, max: worst positions: 0,0,0 -File length: 270760 bytes +File length: 270762 bytes Padding length: 0 bytes Padding ratio: 0% diff --git a/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out b/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out index 633527d..1dbc3e2 100644 --- a/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out +++ b/ql/src/test/results/clientnegative/authorization_caseinsensitivity.q.out @@ -13,7 +13,6 @@ POSTHOOK: type: SHOW_ROLES admin public testrole - PREHOOK: query: drop role TESTROLE PREHOOK: type: DROPROLE POSTHOOK: query: drop role TESTROLE @@ -24,7 +23,6 @@ POSTHOOK: query: show roles POSTHOOK: type: SHOW_ROLES admin public - PREHOOK: query: create role TESTROLE PREHOOK: type: CREATEROLE POSTHOOK: query: create role TESTROLE @@ -36,7 +34,6 @@ POSTHOOK: type: SHOW_ROLES admin public testrole - PREHOOK: query: grant role testROLE to user hive_admin_user PREHOOK: type: GRANT_ROLE POSTHOOK: query: grant role testROLE to user hive_admin_user @@ -56,7 +53,6 @@ POSTHOOK: type: SHOW_ROLES admin public testrole - PREHOOK: query: create role TESTRoLE PREHOOK: type: CREATEROLE FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Role testrole already exists. diff --git a/ql/src/test/results/clientnegative/authorization_drop_db_cascade.q.out b/ql/src/test/results/clientnegative/authorization_drop_db_cascade.q.out index d267a89..a652b29 100644 --- a/ql/src/test/results/clientnegative/authorization_drop_db_cascade.q.out +++ b/ql/src/test/results/clientnegative/authorization_drop_db_cascade.q.out @@ -55,5 +55,4 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation DROPDATABASE [[OBJECT OWNERSHIP] on Object [type=TABLE_OR_VIEW, name=dba2.tab2]] diff --git a/ql/src/test/results/clientnegative/authorization_drop_db_empty.q.out b/ql/src/test/results/clientnegative/authorization_drop_db_empty.q.out index fcffd70..1144fac 100644 --- a/ql/src/test/results/clientnegative/authorization_drop_db_empty.q.out +++ b/ql/src/test/results/clientnegative/authorization_drop_db_empty.q.out @@ -21,7 +21,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: drop database dba1 PREHOOK: type: DROPDATABASE PREHOOK: Input: database:dba1 @@ -37,7 +36,6 @@ POSTHOOK: query: -- check if dropping db as another user fails show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: create database dba2 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:dba2 @@ -49,5 +47,4 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - FAILED: HiveAccessControlException Permission denied: Principal [name=user2, type=USER] does not have following privileges for operation DROPDATABASE [[OBJECT OWNERSHIP] on Object [type=DATABASE, name=dba2]] diff --git a/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out index 05b4119..637167b 100644 --- a/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out +++ b/ql/src/test/results/clientnegative/authorization_drop_role_no_admin.q.out @@ -7,7 +7,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES admin - PREHOOK: query: create role r1 PREHOOK: type: CREATEROLE POSTHOOK: query: create role r1 @@ -21,7 +20,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: drop role r1 PREHOOK: type: DROPROLE FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current user : hive_admin_user is not allowed to drop role. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_grant_server.q.out b/ql/src/test/results/clientnegative/authorization_grant_server.q.out new file mode 100644 index 0000000..b7109a0 --- /dev/null +++ b/ql/src/test/results/clientnegative/authorization_grant_server.q.out @@ -0,0 +1 @@ +FAILED: SemanticException Hive authorization does not support the URI or SERVER objects diff --git a/ql/src/test/results/clientnegative/authorization_grant_uri.q.out b/ql/src/test/results/clientnegative/authorization_grant_uri.q.out new file mode 100644 index 0000000..b7109a0 --- /dev/null +++ b/ql/src/test/results/clientnegative/authorization_grant_uri.q.out @@ -0,0 +1 @@ +FAILED: SemanticException Hive authorization does not support the URI or SERVER objects diff --git a/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out b/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out index 7a9d382..1cbfb3e 100644 --- a/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out +++ b/ql/src/test/results/clientnegative/authorization_priv_current_role_neg.q.out @@ -42,7 +42,6 @@ show current roles POSTHOOK: type: SHOW_ROLES public role2 - PREHOOK: query: grant all on table tpriv_current_role to user user3 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@tpriv_current_role diff --git a/ql/src/test/results/clientnegative/authorization_role_case.q.out b/ql/src/test/results/clientnegative/authorization_role_case.q.out index e254783..4bc1f16 100644 --- a/ql/src/test/results/clientnegative/authorization_role_case.q.out +++ b/ql/src/test/results/clientnegative/authorization_role_case.q.out @@ -14,7 +14,6 @@ admin mixCaseRole1 mixCaseRole2 public - PREHOOK: query: create table t1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default diff --git a/ql/src/test/results/clientnegative/authorization_rolehierarchy_privs.q.out b/ql/src/test/results/clientnegative/authorization_rolehierarchy_privs.q.out index 12ce96e..053d0c0 100644 --- a/ql/src/test/results/clientnegative/authorization_rolehierarchy_privs.q.out +++ b/ql/src/test/results/clientnegative/authorization_rolehierarchy_privs.q.out @@ -3,7 +3,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN @@ -64,7 +63,6 @@ public role1 role2 role3 - PREHOOK: query: select * from t1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -78,7 +76,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: grant select on t1 to role role2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t1 @@ -93,7 +90,6 @@ public role1 role2 role3 - PREHOOK: query: select * from t1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -111,7 +107,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES admin - PREHOOK: query: revoke select on table t1 from role role2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@t1 @@ -139,7 +134,6 @@ role1 role2 role3 role4 - PREHOOK: query: select * from t1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -153,7 +147,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN @@ -175,7 +168,6 @@ role1 role2 role3 role4 - PREHOOK: query: select * from t1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -189,7 +181,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN @@ -206,5 +197,4 @@ public role1 role2 role4 - FAILED: HiveAccessControlException Permission denied: Principal [name=user1, type=USER] does not have following privileges for operation QUERY [[SELECT] on Object [type=TABLE_OR_VIEW, name=default.t1]] diff --git a/ql/src/test/results/clientnegative/date_literal2.q.out b/ql/src/test/results/clientnegative/date_literal2.q.out index 82f6425..a34cac0 100644 --- a/ql/src/test/results/clientnegative/date_literal2.q.out +++ b/ql/src/test/results/clientnegative/date_literal2.q.out @@ -1 +1 @@ -FAILED: SemanticException Unable to convert date literal string to date value. +FAILED: SemanticException Unable to convert time literal '2001/01/01' to time value. diff --git a/ql/src/test/results/clientnegative/date_literal3.q.out b/ql/src/test/results/clientnegative/date_literal3.q.out index 82f6425..f51de7c 100644 --- a/ql/src/test/results/clientnegative/date_literal3.q.out +++ b/ql/src/test/results/clientnegative/date_literal3.q.out @@ -1 +1 @@ -FAILED: SemanticException Unable to convert date literal string to date value. +FAILED: SemanticException Unable to convert time literal '2001-01-32' to time value. diff --git a/ql/src/test/results/clientnegative/gby_star.q.out b/ql/src/test/results/clientnegative/gby_star.q.out new file mode 100644 index 0000000..7c1997e --- /dev/null +++ b/ql/src/test/results/clientnegative/gby_star.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10025]: Expression not in GROUP BY key value diff --git a/ql/src/test/results/clientnegative/gby_star2.q.out b/ql/src/test/results/clientnegative/gby_star2.q.out new file mode 100644 index 0000000..6e5ed65 --- /dev/null +++ b/ql/src/test/results/clientnegative/gby_star2.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10025]: Expression not in GROUP BY key key diff --git a/ql/src/test/results/clientnegative/illegal_partition_type4.q.out b/ql/src/test/results/clientnegative/illegal_partition_type4.q.out index e388086..861ee51 100644 --- a/ql/src/test/results/clientnegative/illegal_partition_type4.q.out +++ b/ql/src/test/results/clientnegative/illegal_partition_type4.q.out @@ -6,4 +6,4 @@ POSTHOOK: query: create table tab1(s string) PARTITIONED BY(dt date, st string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab1 -FAILED: SemanticException Unable to convert date literal string to date value. +FAILED: SemanticException Unable to convert time literal 'foo' to time value. diff --git a/ql/src/test/results/clientnegative/timestamp_literal.q.out b/ql/src/test/results/clientnegative/timestamp_literal.q.out new file mode 100644 index 0000000..b9c92e6 --- /dev/null +++ b/ql/src/test/results/clientnegative/timestamp_literal.q.out @@ -0,0 +1 @@ +FAILED: SemanticException Unable to convert time literal '2012-12-29 20:01:00 +03:00' to time value. diff --git a/ql/src/test/results/clientpositive/acid_join.q.out b/ql/src/test/results/clientpositive/acid_join.q.out new file mode 100644 index 0000000..a1edb89 --- /dev/null +++ b/ql/src/test/results/clientpositive/acid_join.q.out @@ -0,0 +1,72 @@ +PREHOOK: query: -- This test checks that a join with tables with two different buckets send the right bucket info to each table. +create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acidjoin1 +POSTHOOK: query: -- This test checks that a join with tables with two different buckets send the right bucket info to each table. +create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acidjoin1 +PREHOOK: query: create table acidjoin2(name varchar(50), gpa decimal(3, 2)) clustered by (gpa) into 4 buckets stored as orc TBLPROPERTIES ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acidjoin2 +POSTHOOK: query: create table acidjoin2(name varchar(50), gpa decimal(3, 2)) clustered by (gpa) into 4 buckets stored as orc TBLPROPERTIES ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acidjoin2 +PREHOOK: query: create table acidjoin3(name varchar(50), age int, gpa decimal(3, 2)) clustered by (gpa) into 8 buckets stored as orc TBLPROPERTIES ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@acidjoin3 +POSTHOOK: query: create table acidjoin3(name varchar(50), age int, gpa decimal(3, 2)) clustered by (gpa) into 8 buckets stored as orc TBLPROPERTIES ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@acidjoin3 +PREHOOK: query: insert into table acidjoin1 values ('aaa', 35), ('bbb', 32), ('ccc', 32), ('ddd', 35), ('eee', 32) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@acidjoin1 +POSTHOOK: query: insert into table acidjoin1 values ('aaa', 35), ('bbb', 32), ('ccc', 32), ('ddd', 35), ('eee', 32) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@acidjoin1 +POSTHOOK: Lineage: acidjoin1.age EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: acidjoin1.name EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table acidjoin2 values ('aaa', 3.00), ('bbb', 3.01), ('ccc', 3.02), ('ddd', 3.03), ('eee', 3.04) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__2 +PREHOOK: Output: default@acidjoin2 +POSTHOOK: query: insert into table acidjoin2 values ('aaa', 3.00), ('bbb', 3.01), ('ccc', 3.02), ('ddd', 3.03), ('eee', 3.04) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__2 +POSTHOOK: Output: default@acidjoin2 +POSTHOOK: Lineage: acidjoin2.gpa EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: acidjoin2.name EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: insert into table acidjoin3 select a.name, age, gpa from acidjoin1 a join acidjoin2 b on (a.name = b.name) +PREHOOK: type: QUERY +PREHOOK: Input: default@acidjoin1 +PREHOOK: Input: default@acidjoin2 +PREHOOK: Output: default@acidjoin3 +POSTHOOK: query: insert into table acidjoin3 select a.name, age, gpa from acidjoin1 a join acidjoin2 b on (a.name = b.name) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acidjoin1 +POSTHOOK: Input: default@acidjoin2 +POSTHOOK: Output: default@acidjoin3 +POSTHOOK: Lineage: acidjoin3.age SIMPLE [(acidjoin1)a.FieldSchema(name:age, type:int, comment:null), ] +POSTHOOK: Lineage: acidjoin3.gpa SIMPLE [(acidjoin2)b.FieldSchema(name:gpa, type:decimal(3,2), comment:null), ] +POSTHOOK: Lineage: acidjoin3.name SIMPLE [(acidjoin1)a.FieldSchema(name:name, type:varchar(50), comment:null), ] +PREHOOK: query: select * from acidjoin3 order by name +PREHOOK: type: QUERY +PREHOOK: Input: default@acidjoin3 +#### A masked pattern was here #### +POSTHOOK: query: select * from acidjoin3 order by name +POSTHOOK: type: QUERY +POSTHOOK: Input: default@acidjoin3 +#### A masked pattern was here #### +aaa 35 3 +bbb 32 3.01 +ccc 32 3.02 +ddd 35 3.03 +eee 32 3.04 diff --git a/ql/src/test/results/clientpositive/alter_merge_orc.q.out b/ql/src/test/results/clientpositive/alter_merge_orc.q.out index 78f40f4..22914a8 100644 --- a/ql/src/test/results/clientpositive/alter_merge_orc.q.out +++ b/ql/src/test/results/clientpositive/alter_merge_orc.q.out @@ -48,9 +48,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:3 -totalFileSize:7488 -maxFileSize:2496 -minFileSize:2496 +totalFileSize:7494 +maxFileSize:2498 +minFileSize:2498 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test @@ -91,9 +91,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:1 -totalFileSize:7167 -maxFileSize:7167 -minFileSize:7167 +totalFileSize:7169 +maxFileSize:7169 +minFileSize:7169 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test @@ -171,9 +171,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:3 -totalFileSize:7488 -maxFileSize:2496 -minFileSize:2496 +totalFileSize:7494 +maxFileSize:2498 +minFileSize:2498 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test_part @@ -218,9 +218,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:1 -totalFileSize:7167 -maxFileSize:7167 -minFileSize:7167 +totalFileSize:7169 +maxFileSize:7169 +minFileSize:7169 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test_part diff --git a/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out index f8486ad..cdcc18a 100644 --- a/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out +++ b/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out @@ -48,9 +48,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:3 -totalFileSize:7488 -maxFileSize:2496 -minFileSize:2496 +totalFileSize:7494 +maxFileSize:2498 +minFileSize:2498 #### A masked pattern was here #### PREHOOK: query: desc extended src_orc_merge_test_stat @@ -94,7 +94,7 @@ Table Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7488 + totalSize 7494 #### A masked pattern was here #### # Storage Information @@ -146,7 +146,7 @@ Table Parameters: numFiles 1 numRows 1500 rawDataSize 141000 - totalSize 7167 + totalSize 7169 #### A masked pattern was here #### # Storage Information @@ -216,9 +216,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:3 -totalFileSize:7488 -maxFileSize:2496 -minFileSize:2496 +totalFileSize:7494 +maxFileSize:2498 +minFileSize:2498 #### A masked pattern was here #### PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') @@ -249,7 +249,7 @@ Partition Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7488 + totalSize 7494 #### A masked pattern was here #### # Storage Information @@ -300,7 +300,7 @@ Partition Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7488 + totalSize 7494 #### A masked pattern was here #### # Storage Information @@ -359,7 +359,7 @@ Partition Parameters: numFiles 1 numRows 1500 rawDataSize 141000 - totalSize 7167 + totalSize 7169 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out index 148bff4..0d97b7a 100644 --- a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out @@ -280,7 +280,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123 #### A masked pattern was here #### -Beck 0.0 abc 123 +Beck 0 abc 123 Beck 77.341 abc 123 Beck 79.9 abc 123 Cluck 5.96 abc 123 @@ -288,7 +288,7 @@ Mary 33.33 abc 123 Mary 4.329 abc 123 Snow 55.71 abc 123 Tom -12.25 abc 123 -Tom 19.00 abc 123 +Tom 19 abc 123 Tom 234.79 abc 123 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY @@ -347,7 +347,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123 #### A masked pattern was here #### -Beck 0.0 abc 123 +Beck 0 abc 123 Beck 77.341 abc 123 Beck 79.9 abc 123 Cluck 5.96 abc 123 @@ -355,7 +355,7 @@ Mary 33.33 abc 123 Mary 4.329 abc 123 Snow 55.71 abc 123 Tom -12.25 abc 123 -Tom 19.00 abc 123 +Tom 19 abc 123 Tom 234.79 abc 123 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY @@ -367,7 +367,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123 #### A masked pattern was here #### -Beck 0.0 __HIVE_DEFAULT_PARTITION__ 123 +Beck 0 __HIVE_DEFAULT_PARTITION__ 123 Beck 77.341 __HIVE_DEFAULT_PARTITION__ 123 Beck 79.9 __HIVE_DEFAULT_PARTITION__ 123 Cluck 5.96 __HIVE_DEFAULT_PARTITION__ 123 @@ -375,7 +375,7 @@ Mary 33.33 __HIVE_DEFAULT_PARTITION__ 123 Mary 4.329 __HIVE_DEFAULT_PARTITION__ 123 Snow 55.71 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.25 __HIVE_DEFAULT_PARTITION__ 123 -Tom 19.00 __HIVE_DEFAULT_PARTITION__ 123 +Tom 19 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.79 __HIVE_DEFAULT_PARTITION__ 123 PREHOOK: query: -- Try out replace columns alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string) @@ -449,7 +449,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123 #### A masked pattern was here #### -Beck 0.0 __HIVE_DEFAULT_PARTITION__ 123 +Beck 0 __HIVE_DEFAULT_PARTITION__ 123 Beck 77.341 __HIVE_DEFAULT_PARTITION__ 123 Beck 79.9 __HIVE_DEFAULT_PARTITION__ 123 Cluck 5.96 __HIVE_DEFAULT_PARTITION__ 123 @@ -457,7 +457,7 @@ Mary 33.33 __HIVE_DEFAULT_PARTITION__ 123 Mary 4.329 __HIVE_DEFAULT_PARTITION__ 123 Snow 55.71 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.25 __HIVE_DEFAULT_PARTITION__ 123 -Tom 19.00 __HIVE_DEFAULT_PARTITION__ 123 +Tom 19 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.79 __HIVE_DEFAULT_PARTITION__ 123 PREHOOK: query: alter table alter_partition_change_col1 replace columns (c1 string) PREHOOK: type: ALTERTABLE_REPLACECOLS @@ -593,7 +593,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123 #### A masked pattern was here #### -Beck 0.0 __HIVE_DEFAULT_PARTITION__ 123 +Beck 0 __HIVE_DEFAULT_PARTITION__ 123 Beck 77.341 __HIVE_DEFAULT_PARTITION__ 123 Beck 79.9 __HIVE_DEFAULT_PARTITION__ 123 Cluck 5.96 __HIVE_DEFAULT_PARTITION__ 123 @@ -601,7 +601,7 @@ Mary 33.33 __HIVE_DEFAULT_PARTITION__ 123 Mary 4.329 __HIVE_DEFAULT_PARTITION__ 123 Snow 55.71 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.25 __HIVE_DEFAULT_PARTITION__ 123 -Tom 19.00 __HIVE_DEFAULT_PARTITION__ 123 +Tom 19 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.79 __HIVE_DEFAULT_PARTITION__ 123 PREHOOK: query: alter table alter_partition_change_col1 partition (p1='abc', p2='123') add columns (c2 decimal(14,4)) PREHOOK: type: ALTERTABLE_ADDCOLS @@ -638,7 +638,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123 #### A masked pattern was here #### -Beck 0.0 abc 123 +Beck 0 abc 123 Beck 77.341 abc 123 Beck 79.9 abc 123 Cluck 5.96 abc 123 @@ -646,7 +646,7 @@ Mary 33.33 abc 123 Mary 4.329 abc 123 Snow 55.71 abc 123 Tom -12.25 abc 123 -Tom 19.00 abc 123 +Tom 19 abc 123 Tom 234.79 abc 123 PREHOOK: query: select * from alter_partition_change_col1 where p1='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY @@ -658,7 +658,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123 #### A masked pattern was here #### -Beck 0.0 __HIVE_DEFAULT_PARTITION__ 123 +Beck 0 __HIVE_DEFAULT_PARTITION__ 123 Beck 77.341 __HIVE_DEFAULT_PARTITION__ 123 Beck 79.9 __HIVE_DEFAULT_PARTITION__ 123 Cluck 5.96 __HIVE_DEFAULT_PARTITION__ 123 @@ -666,7 +666,7 @@ Mary 33.33 __HIVE_DEFAULT_PARTITION__ 123 Mary 4.329 __HIVE_DEFAULT_PARTITION__ 123 Snow 55.71 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.25 __HIVE_DEFAULT_PARTITION__ 123 -Tom 19.00 __HIVE_DEFAULT_PARTITION__ 123 +Tom 19 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.79 __HIVE_DEFAULT_PARTITION__ 123 PREHOOK: query: -- Try changing column for all partitions at once alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0) diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out index b767a32..fb3c17b 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out @@ -98,11 +98,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 5 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 5 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- partition level analyze statistics for specific parition @@ -135,11 +135,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 2 Data size: 323 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 325 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), '__HIVE_DEFAULT_PARTITION__' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 2 Data size: 323 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 325 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: -- basicStatState: PARTIAL colStatState: NONE @@ -158,11 +158,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 9 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 9 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 9 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 9 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE @@ -181,11 +181,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), '2001' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: -- partition level analyze statistics for all partitions @@ -222,11 +222,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), '__HIVE_DEFAULT_PARTITION__' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE @@ -245,11 +245,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE @@ -268,11 +268,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- both partitions will be pruned @@ -331,11 +331,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: zip (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL @@ -354,7 +354,7 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string) outputColumnNames: _col0 @@ -377,7 +377,7 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: year (type: string) outputColumnNames: _col0 @@ -402,7 +402,7 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int) outputColumnNames: _col0, _col1 @@ -425,7 +425,7 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: state (type: string), locid (type: int) outputColumnNames: _col0, _col1 @@ -448,11 +448,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: state (type: string), locid (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 323 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 325 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL @@ -471,11 +471,11 @@ STAGE PLANS: Processor Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 8 Data size: 723 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 8 Data size: 727 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- This is to test filter expression evaluation on partition column @@ -496,7 +496,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -532,7 +532,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -568,7 +568,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: loc_orc - Statistics: Num rows: 7 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (locid > 0) (type: boolean) Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out index eed5daa..a74d85c 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out @@ -89,11 +89,11 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 3 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 366 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: lastname (type: string), deptid (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 3 Data size: 366 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: -- table level analyze statistics @@ -122,11 +122,11 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: lastname (type: string), deptid (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: NONE ListSink PREHOOK: query: -- column level partial statistics @@ -155,11 +155,11 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: lastname (type: string), deptid (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: PARTIAL ListSink PREHOOK: query: -- all selected columns have statistics @@ -180,7 +180,7 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: deptid (type: int) outputColumnNames: _col0 @@ -213,11 +213,11 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: lastname (type: string), deptid (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE @@ -236,7 +236,7 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: lastname (type: string) outputColumnNames: _col0 @@ -259,7 +259,7 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: deptid (type: int) outputColumnNames: _col0 @@ -282,7 +282,7 @@ STAGE PLANS: Processor Tree: TableScan alias: emp_orc - Statistics: Num rows: 48 Data size: 364 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 48 Data size: 366 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: lastname (type: string), deptid (type: int) outputColumnNames: _col0, _col1 diff --git a/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out b/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out index 5bf93ad..53619c3 100644 --- a/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out +++ b/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out @@ -15,7 +15,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN @@ -25,7 +24,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES admin - PREHOOK: query: select * from t1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 diff --git a/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out b/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out index 2b7b3ad..1b3cfdc 100644 --- a/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out +++ b/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out @@ -41,7 +41,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: SHOW CURRENT ROLES POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: -- user2 should be able to do a describe table, as pubic is in the current roles DESC t_gpr1 PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/authorization_index.q.out b/ql/src/test/results/clientpositive/authorization_index.q.out index 273931a..540d11b 100644 --- a/ql/src/test/results/clientpositive/authorization_index.q.out +++ b/ql/src/test/results/clientpositive/authorization_index.q.out @@ -43,8 +43,6 @@ Compressed: No Num Buckets: -1 Bucket Columns: [] Sort Columns: [Order(col:a, order:1)] -Storage Desc Params: - serialization.format 1 PREHOOK: query: alter index t1_index on t1 rebuild PREHOOK: type: ALTERINDEX_REBUILD PREHOOK: Input: default@t1 diff --git a/ql/src/test/results/clientpositive/authorization_role_grant1.q.out b/ql/src/test/results/clientpositive/authorization_role_grant1.q.out index 702eb68..9cd3f99 100644 --- a/ql/src/test/results/clientpositive/authorization_role_grant1.q.out +++ b/ql/src/test/results/clientpositive/authorization_role_grant1.q.out @@ -29,7 +29,6 @@ POSTHOOK: type: SHOW_ROLES admin public src_role2 - PREHOOK: query: -- revoke role without role keyword revoke src_rolE2 from user user2 PREHOOK: type: REVOKE_ROLE @@ -48,7 +47,6 @@ POSTHOOK: type: SHOW_ROLES admin public src_role2 - PREHOOK: query: ---------------------------------------- -- role granting without role keyword, with admin option (syntax check) ---------------------------------------- @@ -104,7 +102,6 @@ admin public src_role2 src_role_wadmin - PREHOOK: query: drop role Src_role2 PREHOOK: type: DROPROLE POSTHOOK: query: drop role Src_role2 @@ -116,7 +113,6 @@ POSTHOOK: type: SHOW_ROLES admin public src_role_wadmin - PREHOOK: query: drop role sRc_role_wadmin PREHOOK: type: DROPROLE POSTHOOK: query: drop role sRc_role_wadmin @@ -127,4 +123,3 @@ POSTHOOK: query: show roles POSTHOOK: type: SHOW_ROLES admin public - diff --git a/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out b/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out index 8449813..4ac4320 100644 --- a/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out +++ b/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out @@ -7,7 +7,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES admin - PREHOOK: query: create role r1 PREHOOK: type: CREATEROLE POSTHOOK: query: create role r1 @@ -25,7 +24,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES r1 - PREHOOK: query: set role PUBLIC PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role PUBLIC @@ -35,7 +33,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: set role ALL PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ALL @@ -46,7 +43,6 @@ POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public r1 - PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN diff --git a/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out b/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out index 39367be..54c4ce7 100644 --- a/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_sqlstd.q.out @@ -180,7 +180,6 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: show current roles POSTHOOK: type: SHOW_ROLES public - PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN @@ -206,7 +205,6 @@ POSTHOOK: type: SHOW_ROLES admin public role_v - PREHOOK: query: grant all on table vt2 to role role_v PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@vt2 diff --git a/ql/src/test/results/clientpositive/avro_decimal.q.out b/ql/src/test/results/clientpositive/avro_decimal.q.out index 921a418..88268ce 100644 --- a/ql/src/test/results/clientpositive/avro_decimal.q.out +++ b/ql/src/test/results/clientpositive/avro_decimal.q.out @@ -106,9 +106,9 @@ Mary 4.33 Cluck 5.96 Tom -12.25 Mary 33.33 -Tom 19.00 -Beck 0.00 -Beck 79.90 +Tom 19 +Beck 0 +Beck 79.9 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1 PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1 @@ -175,10 +175,10 @@ POSTHOOK: Input: default@avro_dec1 77.3 55.7 4.3 -6.0 +6 12.3 33.3 -19.0 +19 3.2 79.9 PREHOOK: query: DROP TABLE dec diff --git a/ql/src/test/results/clientpositive/avro_decimal_native.q.out b/ql/src/test/results/clientpositive/avro_decimal_native.q.out index 60b4ccc..c8ae0fb 100644 --- a/ql/src/test/results/clientpositive/avro_decimal_native.q.out +++ b/ql/src/test/results/clientpositive/avro_decimal_native.q.out @@ -92,9 +92,9 @@ Mary 4.33 Cluck 5.96 Tom -12.25 Mary 33.33 -Tom 19.00 -Beck 0.00 -Beck 79.90 +Tom 19 +Beck 0 +Beck 79.9 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1 PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1 @@ -143,10 +143,10 @@ POSTHOOK: Input: default@avro_dec1 77.3 55.7 4.3 -6.0 +6 12.3 33.3 -19.0 +19 3.2 79.9 PREHOOK: query: DROP TABLE dec diff --git a/ql/src/test/results/clientpositive/char_pad_convert.q.out b/ql/src/test/results/clientpositive/char_pad_convert.q.out index 63568af..26102e4 100644 --- a/ql/src/test/results/clientpositive/char_pad_convert.q.out +++ b/ql/src/test/results/clientpositive/char_pad_convert.q.out @@ -144,7 +144,7 @@ select lpad(f, 4, ' '), POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k #### A masked pattern was here #### -74.7 42 zzzzzTRUE 20 dd45.40 yard du +74.7 42 zzzzzTRUE 20 ddd45.4 yard du 26.4 37 zzzzzTRUE 20 dd29.62 history 96.9 18 zzzzFALSE 20 dd27.32 history 13.0 34 zzzzFALSE 20 dd23.91 topolog @@ -190,7 +190,7 @@ POSTHOOK: query: select rpad(f, 4, ' '), POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k #### A masked pattern was here #### -74.7 42 TRUEzzzzz 20 45.40dd yard du +74.7 42 TRUEzzzzz 20 45.4ddd yard du 26.4 37 TRUEzzzzz 20 29.62dd history 96.9 18 FALSEzzzz 20 27.32dd history 13.0 34 FALSEzzzz 20 23.91dd topolog diff --git a/ql/src/test/results/clientpositive/create_like.q.out b/ql/src/test/results/clientpositive/create_like.q.out index 7d05b4f..b9e2783 100644 --- a/ql/src/test/results/clientpositive/create_like.q.out +++ b/ql/src/test/results/clientpositive/create_like.q.out @@ -405,3 +405,51 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 +PREHOOK: query: CREATE TABLE PropertiedParquetTable(a INT, b STRING) STORED AS PARQUET TBLPROPERTIES("parquet.compression"="LZO") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@PropertiedParquetTable +POSTHOOK: query: CREATE TABLE PropertiedParquetTable(a INT, b STRING) STORED AS PARQUET TBLPROPERTIES("parquet.compression"="LZO") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@PropertiedParquetTable +PREHOOK: query: CREATE TABLE LikePropertiedParquetTable LIKE PropertiedParquetTable +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@LikePropertiedParquetTable +POSTHOOK: query: CREATE TABLE LikePropertiedParquetTable LIKE PropertiedParquetTable +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@LikePropertiedParquetTable +PREHOOK: query: DESCRIBE FORMATTED LikePropertiedParquetTable +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@likepropertiedparquettable +POSTHOOK: query: DESCRIBE FORMATTED LikePropertiedParquetTable +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@likepropertiedparquettable +# col_name data_type comment + +a int +b string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + parquet.compression LZO +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe +InputFormat: org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out index c26a66d..252aa0f 100644 --- a/ql/src/test/results/clientpositive/ctas_colname.q.out +++ b/ql/src/test/results/clientpositive/ctas_colname.q.out @@ -461,17 +461,22 @@ POSTHOOK: Input: default@x5 119 val_119 119 PREHOOK: query: -- sub queries explain -create table x6 as select * from (select *, max(key) from src1) a +create table x6 as select * from (select *, key + 1 from src1) a PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: -- sub queries explain -create table x6 as select * from (select *, max(key) from src1) a +create table x6 as select * from (select *, key + 1 from src1) a POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-3 depends on stages: Stage-0 - Stage-2 depends on stages: Stage-3 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-8 depends on stages: Stage-0 + Stage-2 depends on stages: Stage-8 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -481,36 +486,26 @@ STAGE PLANS: alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: key + expressions: key (type: string), value (type: string), (key + 1) (type: double) + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: max(key) - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) - Reduce Operator Tree: - Group By Operator - aggregations: max(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: string), _col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 84 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.x6 + File Output Operator + compressed: false + Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x6 + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### Stage: Stage-0 Move Operator @@ -518,10 +513,10 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### - Stage: Stage-3 + Stage: Stage-8 Create Table Operator: Create Table - columns: _col0 string, _c1 string + columns: key string, value string, _c1 double input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -530,12 +525,42 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: create table x6 as select * from (select *, max(key) from src1) a + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x6 + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.x6 + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: create table x6 as select * from (select *, key + 1 from src1) a PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src1 PREHOOK: Output: database:default PREHOOK: Output: default@x6 -POSTHOOK: query: create table x6 as select * from (select *, max(key) from src1) a +POSTHOOK: query: create table x6 as select * from (select *, key + 1 from src1) a POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src1 POSTHOOK: Output: database:default @@ -548,8 +573,9 @@ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@x6 # col_name data_type comment -_col0 string -_c1 string +key string +value string +_c1 double # Detailed Table Information Database: default @@ -561,9 +587,9 @@ Table Type: MANAGED_TABLE Table Parameters: COLUMN_STATS_ACCURATE true numFiles 1 - numRows 1 - rawDataSize 5 - totalSize 6 + numRows 25 + rawDataSize 309 + totalSize 334 #### A masked pattern was here #### # Storage Information @@ -584,12 +610,36 @@ POSTHOOK: query: select * from x6 POSTHOOK: type: QUERY POSTHOOK: Input: default@x6 #### A masked pattern was here #### -98 98 + NULL + NULL + NULL + NULL + val_165 NULL + val_193 NULL + val_265 NULL + val_27 NULL + val_409 NULL + val_484 NULL +128 129.0 +146 val_146 147.0 +150 val_150 151.0 +213 val_213 214.0 +224 225.0 +238 val_238 239.0 +255 val_255 256.0 +273 val_273 274.0 +278 val_278 279.0 +311 val_311 312.0 +369 370.0 +401 val_401 402.0 +406 val_406 407.0 +66 val_66 67.0 +98 val_98 99.0 PREHOOK: query: explain -create table x7 as select * from (select * from src group by key) a +create table x7 as select * from (select *, count(value) from src group by key, value) a PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain -create table x7 as select * from (select * from src group by key) a +create table x7 as select * from (select *, count(value) from src group by key, value) a POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage @@ -605,28 +655,31 @@ STAGE PLANS: alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: key + expressions: key (type: string), value (type: string) + outputColumnNames: key, value Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: string) + aggregations: count(value) + keys: key (type: string), value (type: string) mode: hash - outputColumnNames: _col0 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator - keys: KEY._col0 (type: string) + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial - outputColumnNames: _col0 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string) - outputColumnNames: _col0 + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -646,7 +699,7 @@ STAGE PLANS: Stage: Stage-3 Create Table Operator: Create Table - columns: _col0 string + columns: _col0 string, _col1 string, _c1 bigint input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -655,12 +708,12 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: create table x7 as select * from (select * from src group by key) a +PREHOOK: query: create table x7 as select * from (select *, count(value) from src group by key, value) a PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@x7 -POSTHOOK: query: create table x7 as select * from (select * from src group by key) a +POSTHOOK: query: create table x7 as select * from (select *, count(value) from src group by key, value) a POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -674,6 +727,8 @@ POSTHOOK: Input: default@x7 # col_name data_type comment _col0 string +_col1 string +_c1 bigint # Detailed Table Information Database: default @@ -686,8 +741,8 @@ Table Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 309 - rawDataSize 864 - totalSize 1173 + rawDataSize 3891 + totalSize 4200 #### A masked pattern was here #### # Storage Information @@ -708,320 +763,320 @@ POSTHOOK: query: select * from x7 POSTHOOK: type: QUERY POSTHOOK: Input: default@x7 #### A masked pattern was here #### -0 -10 -100 -103 -104 -105 -11 -111 -113 -114 -116 -118 -119 -12 -120 -125 -126 -128 -129 -131 -133 -134 -136 -137 -138 -143 -145 -146 -149 -15 -150 -152 -153 -155 -156 -157 -158 -160 -162 -163 -164 -165 -166 -167 -168 -169 -17 -170 -172 -174 -175 -176 -177 -178 -179 -18 -180 -181 -183 -186 -187 -189 -19 -190 -191 -192 -193 -194 -195 -196 -197 -199 -2 -20 -200 -201 -202 -203 -205 -207 -208 -209 -213 -214 -216 -217 -218 -219 -221 -222 -223 -224 -226 -228 -229 -230 -233 -235 -237 -238 -239 -24 -241 -242 -244 -247 -248 -249 -252 -255 -256 -257 -258 -26 -260 -262 -263 -265 -266 -27 -272 -273 -274 -275 -277 -278 -28 -280 -281 -282 -283 -284 -285 -286 -287 -288 -289 -291 -292 -296 -298 -30 -302 -305 -306 -307 -308 -309 -310 -311 -315 -316 -317 -318 -321 -322 -323 -325 -327 -33 -331 -332 -333 -335 -336 -338 -339 -34 -341 -342 -344 -345 -348 -35 -351 -353 -356 -360 -362 -364 -365 -366 -367 -368 -369 -37 -373 -374 -375 -377 -378 -379 -382 -384 -386 -389 -392 -393 -394 -395 -396 -397 -399 -4 -400 -401 -402 -403 -404 -406 -407 -409 -41 -411 -413 -414 -417 -418 -419 -42 -421 -424 -427 -429 -43 -430 -431 -432 -435 -436 -437 -438 -439 -44 -443 -444 -446 -448 -449 -452 -453 -454 -455 -457 -458 -459 -460 -462 -463 -466 -467 -468 -469 -47 -470 -472 -475 -477 -478 -479 -480 -481 -482 -483 -484 -485 -487 -489 -490 -491 -492 -493 -494 -495 -496 -497 -498 -5 -51 -53 -54 -57 -58 -64 -65 -66 -67 -69 -70 -72 -74 -76 -77 -78 -8 -80 -82 -83 -84 -85 -86 -87 -9 -90 -92 -95 -96 -97 -98 +0 val_0 3 +10 val_10 1 +100 val_100 2 +103 val_103 2 +104 val_104 2 +105 val_105 1 +11 val_11 1 +111 val_111 1 +113 val_113 2 +114 val_114 1 +116 val_116 1 +118 val_118 2 +119 val_119 3 +12 val_12 2 +120 val_120 2 +125 val_125 2 +126 val_126 1 +128 val_128 3 +129 val_129 2 +131 val_131 1 +133 val_133 1 +134 val_134 2 +136 val_136 1 +137 val_137 2 +138 val_138 4 +143 val_143 1 +145 val_145 1 +146 val_146 2 +149 val_149 2 +15 val_15 2 +150 val_150 1 +152 val_152 2 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 2 +165 val_165 2 +166 val_166 1 +167 val_167 3 +168 val_168 1 +169 val_169 4 +17 val_17 1 +170 val_170 1 +172 val_172 2 +174 val_174 2 +175 val_175 2 +176 val_176 2 +177 val_177 1 +178 val_178 1 +179 val_179 2 +18 val_18 2 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 3 +189 val_189 1 +19 val_19 1 +190 val_190 1 +191 val_191 2 +192 val_192 1 +193 val_193 3 +194 val_194 1 +195 val_195 2 +196 val_196 1 +197 val_197 2 +199 val_199 3 +2 val_2 1 +20 val_20 1 +200 val_200 2 +201 val_201 1 +202 val_202 1 +203 val_203 2 +205 val_205 2 +207 val_207 2 +208 val_208 3 +209 val_209 2 +213 val_213 2 +214 val_214 1 +216 val_216 2 +217 val_217 2 +218 val_218 1 +219 val_219 2 +221 val_221 2 +222 val_222 1 +223 val_223 2 +224 val_224 2 +226 val_226 1 +228 val_228 1 +229 val_229 2 +230 val_230 5 +233 val_233 2 +235 val_235 1 +237 val_237 2 +238 val_238 2 +239 val_239 2 +24 val_24 2 +241 val_241 1 +242 val_242 2 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 2 +256 val_256 2 +257 val_257 1 +258 val_258 1 +26 val_26 2 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 2 +266 val_266 1 +27 val_27 1 +272 val_272 2 +273 val_273 3 +274 val_274 1 +275 val_275 1 +277 val_277 4 +278 val_278 2 +28 val_28 1 +280 val_280 2 +281 val_281 2 +282 val_282 2 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 2 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 3 +30 val_30 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 2 +308 val_308 1 +309 val_309 2 +310 val_310 1 +311 val_311 3 +315 val_315 1 +316 val_316 3 +317 val_317 2 +318 val_318 3 +321 val_321 2 +322 val_322 2 +323 val_323 1 +325 val_325 2 +327 val_327 3 +33 val_33 1 +331 val_331 2 +332 val_332 1 +333 val_333 2 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +34 val_34 1 +341 val_341 1 +342 val_342 2 +344 val_344 2 +345 val_345 1 +348 val_348 5 +35 val_35 3 +351 val_351 1 +353 val_353 2 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 2 +368 val_368 1 +369 val_369 3 +37 val_37 2 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 2 +384 val_384 3 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 2 +396 val_396 3 +397 val_397 2 +399 val_399 2 +4 val_4 1 +400 val_400 1 +401 val_401 5 +402 val_402 1 +403 val_403 3 +404 val_404 2 +406 val_406 4 +407 val_407 1 +409 val_409 3 +41 val_41 1 +411 val_411 1 +413 val_413 2 +414 val_414 2 +417 val_417 3 +418 val_418 1 +419 val_419 1 +42 val_42 2 +421 val_421 1 +424 val_424 2 +427 val_427 1 +429 val_429 2 +43 val_43 1 +430 val_430 3 +431 val_431 3 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 3 +439 val_439 2 +44 val_44 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 3 +455 val_455 1 +457 val_457 1 +458 val_458 2 +459 val_459 2 +460 val_460 1 +462 val_462 2 +463 val_463 2 +466 val_466 3 +467 val_467 1 +468 val_468 4 +469 val_469 5 +47 val_47 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 2 +479 val_479 1 +480 val_480 3 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 4 +490 val_490 1 +491 val_491 1 +492 val_492 2 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 3 +5 val_5 3 +51 val_51 2 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 2 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 2 +69 val_69 1 +70 val_70 3 +72 val_72 2 +74 val_74 1 +76 val_76 2 +77 val_77 1 +78 val_78 1 +8 val_8 1 +80 val_80 1 +82 val_82 1 +83 val_83 2 +84 val_84 2 +85 val_85 1 +86 val_86 1 +87 val_87 1 +9 val_9 1 +90 val_90 3 +92 val_92 1 +95 val_95 2 +96 val_96 1 +97 val_97 2 +98 val_98 2 PREHOOK: query: explain -create table x8 as select * from (select * from src group by key having key < 9) a +create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a PREHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: query: explain -create table x8 as select * from (select * from src group by key having key < 9) a +create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1040,28 +1095,31 @@ STAGE PLANS: predicate: (key < 9) (type: boolean) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: key (type: string) - outputColumnNames: key + expressions: key (type: string), value (type: string) + outputColumnNames: key, value Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Group By Operator - keys: key (type: string) + aggregations: count(value) + keys: key (type: string), value (type: string) mode: hash - outputColumnNames: _col0 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator - keys: KEY._col0 (type: string) + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: mergepartial - outputColumnNames: _col0 + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string) - outputColumnNames: _col0 + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -1081,7 +1139,7 @@ STAGE PLANS: Stage: Stage-3 Create Table Operator: Create Table - columns: _col0 string + columns: _col0 string, _col1 string, _c1 bigint input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat serde name: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe @@ -1090,12 +1148,12 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: create table x8 as select * from (select * from src group by key having key < 9) a +PREHOOK: query: create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@x8 -POSTHOOK: query: create table x8 as select * from (select * from src group by key having key < 9) a +POSTHOOK: query: create table x8 as select * from (select *, count(value) from src group by key, value having key < 9) a POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -1109,6 +1167,8 @@ POSTHOOK: Input: default@x8 # col_name data_type comment _col0 string +_col1 string +_c1 bigint # Detailed Table Information Database: default @@ -1121,8 +1181,8 @@ Table Parameters: COLUMN_STATS_ACCURATE true numFiles 1 numRows 5 - rawDataSize 5 - totalSize 10 + rawDataSize 45 + totalSize 50 #### A masked pattern was here #### # Storage Information @@ -1143,11 +1203,11 @@ POSTHOOK: query: select * from x8 POSTHOOK: type: QUERY POSTHOOK: Input: default@x8 #### A masked pattern was here #### -0 -2 -4 -5 -8 +0 val_0 3 +2 val_2 1 +4 val_4 1 +5 val_5 3 +8 val_8 1 PREHOOK: query: explain create table x9 as select * from (select max(value),key from src group by key having key < 9 AND max(value) IS NOT NULL) a PREHOOK: type: CREATETABLE_AS_SELECT diff --git a/ql/src/test/results/clientpositive/database_drop.q.out b/ql/src/test/results/clientpositive/database_drop.q.out index f483c06..225104f 100644 --- a/ql/src/test/results/clientpositive/database_drop.q.out +++ b/ql/src/test/results/clientpositive/database_drop.q.out @@ -352,6 +352,42 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@extab1 +PREHOOK: query: -- add a table, create index (give a name for index table) +CREATE TABLE temp_tbl3 (id INT, name STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:db5 +PREHOOK: Output: db5@temp_tbl3 +POSTHOOK: query: -- add a table, create index (give a name for index table) +CREATE TABLE temp_tbl3 (id INT, name STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:db5 +POSTHOOK: Output: db5@temp_tbl3 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl3 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: db5@temp_tbl3 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' into table temp_tbl3 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: db5@temp_tbl3 +PREHOOK: query: CREATE INDEX temp_tbl3_idx ON TABLE temp_tbl3(id) AS 'COMPACT' with DEFERRED REBUILD IN TABLE temp_tbl3_idx_tbl +PREHOOK: type: CREATEINDEX +PREHOOK: Input: db5@temp_tbl3 +POSTHOOK: query: CREATE INDEX temp_tbl3_idx ON TABLE temp_tbl3(id) AS 'COMPACT' with DEFERRED REBUILD IN TABLE temp_tbl3_idx_tbl +POSTHOOK: type: CREATEINDEX +POSTHOOK: Input: db5@temp_tbl3 +POSTHOOK: Output: db5@temp_tbl3_idx_tbl +PREHOOK: query: ALTER INDEX temp_tbl3_idx ON temp_tbl3 REBUILD +PREHOOK: type: ALTERINDEX_REBUILD +PREHOOK: Input: db5@temp_tbl3 +PREHOOK: Output: db5@temp_tbl3_idx_tbl +POSTHOOK: query: ALTER INDEX temp_tbl3_idx ON temp_tbl3 REBUILD +POSTHOOK: type: ALTERINDEX_REBUILD +POSTHOOK: Input: db5@temp_tbl3 +POSTHOOK: Output: db5@temp_tbl3_idx_tbl +POSTHOOK: Lineage: temp_tbl3_idx_tbl._bucketname SIMPLE [(temp_tbl3)temp_tbl3.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: temp_tbl3_idx_tbl._offsets EXPRESSION [(temp_tbl3)temp_tbl3.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: temp_tbl3_idx_tbl.id SIMPLE [(temp_tbl3)temp_tbl3.FieldSchema(name:id, type:int, comment:null), ] PREHOOK: query: -- drop the database with cascade DROP DATABASE db5 CASCADE PREHOOK: type: DROPDATABASE @@ -369,6 +405,8 @@ PREHOOK: Output: db5@part_tab3 PREHOOK: Output: db5@temp_tbl PREHOOK: Output: db5@temp_tbl2 PREHOOK: Output: db5@temp_tbl2_view +PREHOOK: Output: db5@temp_tbl3 +PREHOOK: Output: db5@temp_tbl3_idx_tbl PREHOOK: Output: db5@temp_tbl_view POSTHOOK: query: -- drop the database with cascade DROP DATABASE db5 CASCADE @@ -387,5 +425,7 @@ POSTHOOK: Output: db5@part_tab3 POSTHOOK: Output: db5@temp_tbl POSTHOOK: Output: db5@temp_tbl2 POSTHOOK: Output: db5@temp_tbl2_view +POSTHOOK: Output: db5@temp_tbl3 +POSTHOOK: Output: db5@temp_tbl3_idx_tbl POSTHOOK: Output: db5@temp_tbl_view #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/decimal_2.q.out b/ql/src/test/results/clientpositive/decimal_2.q.out index 759ecf4..934590c 100644 --- a/ql/src/test/results/clientpositive/decimal_2.q.out +++ b/ql/src/test/results/clientpositive/decimal_2.q.out @@ -264,7 +264,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) from deci POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_2 #### A masked pattern was here #### -1.0 +1 PREHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) from decimal_2 PREHOOK: type: QUERY PREHOOK: Input: default@decimal_2 diff --git a/ql/src/test/results/clientpositive/decimal_3.q.out b/ql/src/test/results/clientpositive/decimal_3.q.out index acaae65..8e9a30a 100644 --- a/ql/src/test/results/clientpositive/decimal_3.q.out +++ b/ql/src/test/results/clientpositive/decimal_3.q.out @@ -33,7 +33,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -42,7 +42,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -53,8 +53,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1.0 1 -1.000000000000000000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 @@ -62,14 +62,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -78,14 +78,14 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 200 200 125.2 125 -124.00 124 +124 124 100 100 20 20 10 10 -3.140 4 +3.14 4 3.14 3 3.14 3 3.14 3 @@ -93,8 +93,8 @@ POSTHOOK: Input: default@decimal_3 2 2 1.122 1 1.12 1 -1.000000000000000000 1 -1.0 1 +1 1 +1 1 1 1 0.333 0 0.33 0 @@ -105,7 +105,7 @@ POSTHOOK: Input: default@decimal_3 0.01 0 0 0 0 0 -0.000000000000000000 0 +0 0 -0.3 0 -0.33 0 -0.333 0 @@ -114,7 +114,7 @@ POSTHOOK: Input: default@decimal_3 -1.122 -11 -1255.49 -1255 -4400 4400 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 NULL 0 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value PREHOOK: type: QUERY @@ -125,7 +125,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -134,7 +134,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -145,8 +145,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1.0 1 -1.000000000000000000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 @@ -154,14 +154,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -171,7 +171,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL --1234567890.1234567890 +-1234567890.123456789 -4400 -1255.49 -1.122 @@ -179,7 +179,7 @@ NULL -0.333 -0.33 -0.3 -0.000000000000000000 +0 0.01 0.02 0.1 @@ -195,10 +195,10 @@ NULL 10 20 100 -124.00 +124 125.2 200 -1234567890.1234567800 +1234567890.12345678 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -208,7 +208,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -216,7 +216,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0.01 0 0.02 0 0.1 0 @@ -232,10 +232,10 @@ NULL 0 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -244,23 +244,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### --1234567890 -1234567890.1234567890 +-1234567890 -1234567890.123456789 -1255 -1255.49 -11 -1.122 -1 -2.24 -0 0.330000000000000000 -1 5.242000000000000000 +0 0.33 +1 5.242 2 4 3 9.42 -4 3.140 +4 3.14 10 10 20 20 100 100 -124 124.00 +124 124 125 125.2 200 200 4400 -4400 -1234567890 1234567890.1234567800 +1234567890 1234567890.12345678 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -269,7 +269,7 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### --1234567890.1234567890 -1234567890 -1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -1234567890.123456789 -1234567890 -4400 4400 -4400 4400 -1255.49 -1255 -1255.49 -1255 -1.122 -11 -1.122 -11 @@ -280,7 +280,11 @@ POSTHOOK: Input: default@decimal_3 -0.333 0 -0.333 0 -0.33 0 -0.33 0 -0.3 0 -0.3 0 -0.000000000000000000 0 0.000000000000000000 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 @@ -293,8 +297,14 @@ POSTHOOK: Input: default@decimal_3 0.33 0 0.33 0 0.333 0 0.333 0 1 1 1 1 -1.0 1 1.0 1 -1.000000000000000000 1 1.000000000000000000 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 1.12 1 1.12 1 1.122 1 1.122 1 2 2 2 2 @@ -310,14 +320,20 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 3.14 3 3.14 3 3.14 3 -3.140 4 3.140 4 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 4 10 10 10 10 20 20 20 20 100 100 100 100 -124.00 124 124.00 124 +124 124 124 124 125.2 125 125.2 125 200 200 200 200 -1234567890.1234567800 1234567890 1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -329,7 +345,7 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -341,7 +357,7 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 PREHOOK: query: DROP TABLE DECIMAL_3 PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_3 diff --git a/ql/src/test/results/clientpositive/decimal_4.q.out b/ql/src/test/results/clientpositive/decimal_4.q.out index a31d27a..50662af 100644 --- a/ql/src/test/results/clientpositive/decimal_4.q.out +++ b/ql/src/test/results/clientpositive/decimal_4.q.out @@ -57,7 +57,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_1 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -66,7 +66,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.0000000000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -78,7 +78,7 @@ NULL 0 0.333 0 0.9999999999999999999999999 1 1 1 -1.0 1 +1 1 1.12 1 1.122 1 2 2 @@ -86,14 +86,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_4_2 @@ -103,7 +103,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_2 #### A masked pattern was here #### NULL NULL --1234567890.1234567890 -3703703670.3703703670 +-1234567890.123456789 -3703703670.370370367 -4400 -13200 -1255.49 -3766.47 -1.122 -3.366 @@ -112,7 +112,7 @@ NULL NULL -0.333 -0.999 -0.33 -0.99 -0.3 -0.9 -0.0000000000000000000000000 0.0000000000000000000000000 +0 0 0 0 0 0 0.01 0.03 @@ -124,7 +124,7 @@ NULL NULL 0.333 0.999 0.9999999999999999999999999 2.9999999999999999999999997 1 3 -1.0 3.0 +1 3 1.12 3.36 1.122 3.366 2 6 @@ -132,14 +132,14 @@ NULL NULL 3.14 9.42 3.14 9.42 3.14 9.42 -3.140 9.420 +3.14 9.42 10 30 20 60 100 300 -124.00 372.00 +124 372 125.2 375.6 200 600 -1234567890.1234567800 3703703670.3703703400 +1234567890.12345678 3703703670.37037034 PREHOOK: query: DROP TABLE DECIMAL_4_1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_4_1 diff --git a/ql/src/test/results/clientpositive/decimal_5.q.out b/ql/src/test/results/clientpositive/decimal_5.q.out index 6df5097..0f24b8a 100644 --- a/ql/src/test/results/clientpositive/decimal_5.q.out +++ b/ql/src/test/results/clientpositive/decimal_5.q.out @@ -43,7 +43,7 @@ NULL -0.333 -0.33 -0.3 -0.00000 +0 0 0 0.01 @@ -54,8 +54,8 @@ NULL 0.33 0.333 1 -1.0 -1.00000 +1 +1 1.12 1.122 2 @@ -63,11 +63,11 @@ NULL 3.14 3.14 3.14 -3.140 +3.14 10 20 100 -124.00 +124 125.2 200 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key @@ -86,7 +86,7 @@ NULL -0.333 -0.33 -0.3 -0.00000 +0 0.01 0.02 0.1 @@ -102,7 +102,7 @@ NULL 10 20 100 -124.00 +124 125.2 200 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 @@ -161,7 +161,7 @@ POSTHOOK: Input: default@decimal_5 #### A masked pattern was here #### NULL NULL -0.000 +0 0 100 10 @@ -180,7 +180,7 @@ NULL -0.3 -0.33 -0.333 -1.0 +1 2 3.14 -1.12 @@ -188,13 +188,13 @@ NULL -1.122 1.12 1.122 -124.00 +124 125.2 NULL 3.14 3.14 -3.140 -1.000 +3.14 +1 NULL NULL PREHOOK: query: DROP TABLE DECIMAL_5 diff --git a/ql/src/test/results/clientpositive/decimal_6.q.out b/ql/src/test/results/clientpositive/decimal_6.q.out index 2bbd48a..0344fa9 100644 --- a/ql/src/test/results/clientpositive/decimal_6.q.out +++ b/ql/src/test/results/clientpositive/decimal_6.q.out @@ -91,16 +91,16 @@ NULL -0.333 -0.3 -0.3 -0.00000 -0.0000 +0 +0 0 0 0.333 0.333 -1.0 -1.0 -1.0000 -1.00000 +1 +1 +1 +1 1.12 1.12 1.122 @@ -111,14 +111,14 @@ NULL 3.14 3.14 3.14 -3.140 -3.140 +3.14 +3.14 10 10 10.7343 10.73433 -124.00 -124.00 +124 +124 125.2 125.2 23232.23435 diff --git a/ql/src/test/results/clientpositive/decimal_join2.q.out b/ql/src/test/results/clientpositive/decimal_join2.q.out new file mode 100644 index 0000000..f9c33b7 --- /dev/null +++ b/ql/src/test/results/clientpositive/decimal_join2.q.out @@ -0,0 +1,371 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3_txt +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_3 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_3_txt +POSTHOOK: query: CREATE TABLE DECIMAL_3_txt(key decimal(38,18), value int) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ' ' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_3_txt +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_3_txt +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DECIMAL_3_txt +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_3_txt +PREHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@decimal_3_txt +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_3 +POSTHOOK: query: CREATE TABLE DECIMAL_3 STORED AS ORC AS SELECT * FROM DECIMAL_3_txt +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@decimal_3_txt +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_3 +PREHOOK: query: EXPLAIN +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: decimal(38,18)) + sort order: + + Map-reduce partition columns: key (type: decimal(38,18)) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: int) + TableScan + alias: a + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: decimal(38,18)) + sort order: + + Map-reduce partition columns: key (type: decimal(38,18)) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: int) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(38,18)), _col1 (type: int), _col5 (type: decimal(38,18)), _col6 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: decimal(38,18)), _col1 (type: int), _col2 (type: decimal(38,18)), _col3 (type: int) + sort order: ++++ + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(38,18)), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: decimal(38,18)), KEY.reducesinkkey3 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +-1234567890.123456789 -1234567890 -1234567890.123456789 -1234567890 +-4400 4400 -4400 4400 +-1255.49 -1255 -1255.49 -1255 +-1.122 -11 -1.122 -11 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-0.333 0 -0.333 0 +-0.33 0 -0.33 0 +-0.3 0 -0.3 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0.01 0 0.01 0 +0.02 0 0.02 0 +0.1 0 0.1 0 +0.2 0 0.2 0 +0.3 0 0.3 0 +0.33 0 0.33 0 +0.333 0 0.333 0 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1.12 1 1.12 1 +1.122 1 1.122 1 +2 2 2 2 +2 2 2 2 +2 2 2 2 +2 2 2 2 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 4 +10 10 10 10 +20 20 20 20 +100 100 100 100 +124 124 124 124 +125.2 125 125.2 125 +200 200 200 200 +1234567890.12345678 1234567890 1234567890.12345678 1234567890 +PREHOOK: query: EXPLAIN +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-5 is a root stage + Stage-2 depends on stages: Stage-5 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-5 + Map Reduce Local Work + Alias -> Map Local Tables: + a + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + a + TableScan + alias: a + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 {value} + 1 {key} {value} + keys: + 0 key (type: decimal(38,18)) + 1 key (type: decimal(38,18)) + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {key} {value} + 1 {key} {value} + keys: + 0 key (type: decimal(38,18)) + 1 key (type: decimal(38,18)) + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: decimal(38,18)), _col1 (type: int), _col5 (type: decimal(38,18)), _col6 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: decimal(38,18)), _col1 (type: int), _col2 (type: decimal(38,18)), _col3 (type: int) + sort order: ++++ + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + Local Work: + Map Reduce Local Work + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: decimal(38,18)), KEY.reducesinkkey1 (type: int), KEY.reducesinkkey2 (type: decimal(38,18)), KEY.reducesinkkey3 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 20 Data size: 2362 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.key, b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_3 +#### A masked pattern was here #### +-1234567890.123456789 -1234567890 -1234567890.123456789 -1234567890 +-4400 4400 -4400 4400 +-1255.49 -1255 -1255.49 -1255 +-1.122 -11 -1.122 -11 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-1.12 -1 -1.12 -1 +-0.333 0 -0.333 0 +-0.33 0 -0.33 0 +-0.3 0 -0.3 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0.01 0 0.01 0 +0.02 0 0.02 0 +0.1 0 0.1 0 +0.2 0 0.2 0 +0.3 0 0.3 0 +0.33 0 0.33 0 +0.333 0 0.333 0 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1.12 1 1.12 1 +1.122 1 1.122 1 +2 2 2 2 +2 2 2 2 +2 2 2 2 +2 2 2 2 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 3 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 4 +10 10 10 10 +20 20 20 20 +100 100 100 100 +124 124 124 124 +125.2 125 125.2 125 +200 200 200 200 +1234567890.12345678 1234567890 1234567890.12345678 1234567890 +PREHOOK: query: DROP TABLE DECIMAL_3_txt +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_3_txt +PREHOOK: Output: default@decimal_3_txt +POSTHOOK: query: DROP TABLE DECIMAL_3_txt +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_3_txt +POSTHOOK: Output: default@decimal_3_txt +PREHOOK: query: DROP TABLE DECIMAL_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_3 +PREHOOK: Output: default@decimal_3 +POSTHOOK: query: DROP TABLE DECIMAL_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_3 +POSTHOOK: Output: default@decimal_3 diff --git a/ql/src/test/results/clientpositive/decimal_precision.q.out b/ql/src/test/results/clientpositive/decimal_precision.q.out index 94c63cb..f3f2cbc 100644 --- a/ql/src/test/results/clientpositive/decimal_precision.q.out +++ b/ql/src/test/results/clientpositive/decimal_precision.q.out @@ -76,13 +76,13 @@ NULL NULL NULL NULL -0.0000000000 -0.0000000000 -0.0000000000 -0.0000000000 0 -0.1234567890 -0.1234567890 +0 +0 +0 +0 +0.123456789 +0.123456789 1.2345678901 1.2345678901 1.2345678901 @@ -106,7 +106,7 @@ NULL 123456789.0123456 123456789.0123456789 1234567890.123456 -1234567890.1234567890 +1234567890.123456789 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -159,13 +159,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 0 1 -1 -0.1234567890 1.1234567890 -0.8765432110 -0.1234567890 1.1234567890 -0.8765432110 +0 1 -1 +0 1 -1 +0 1 -1 +0 1 -1 +0.123456789 1.123456789 -0.876543211 +0.123456789 1.123456789 -0.876543211 1.2345678901 2.2345678901 0.2345678901 1.2345678901 2.2345678901 0.2345678901 1.2345678901 2.2345678901 0.2345678901 @@ -189,7 +189,7 @@ NULL NULL NULL 123456789.0123456 123456790.0123456 123456788.0123456 123456789.0123456789 123456790.0123456789 123456788.0123456789 1234567890.123456 1234567891.123456 1234567889.123456 -1234567890.1234567890 1234567891.1234567890 1234567889.1234567890 +1234567890.123456789 1234567891.123456789 1234567889.123456789 PREHOOK: query: SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -242,13 +242,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 0 0 0 -0.1234567890 0.2469135780 0.041152263 -0.1234567890 0.2469135780 0.041152263 +0 0 0 +0 0 0 +0 0 0 +0 0 0 +0.123456789 0.246913578 0.041152263 +0.123456789 0.246913578 0.041152263 1.2345678901 2.4691357802 0.411522630033 1.2345678901 2.4691357802 0.411522630033 1.2345678901 2.4691357802 0.411522630033 @@ -258,9 +258,9 @@ NULL NULL NULL 123.4567890123 246.9135780246 41.1522630041 123.4567890123 246.9135780246 41.1522630041 123.4567890123 246.9135780246 41.1522630041 -1234.5678901235 2469.1357802470 411.522630041167 -1234.5678901235 2469.1357802470 411.522630041167 -1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 12345.6789012346 24691.3578024692 4115.226300411533 12345.6789012346 24691.3578024692 4115.226300411533 123456.7890123456 246913.5780246912 41152.2630041152 @@ -272,7 +272,7 @@ NULL NULL NULL 123456789.0123456 246913578.0246912 41152263.0041152 123456789.0123456789 246913578.0246913578 41152263.0041152263 1234567890.123456 2469135780.246912 411522630.041152 -1234567890.1234567890 2469135780.2469135780 411522630.041152263 +1234567890.123456789 2469135780.246913578 411522630.041152263 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -325,13 +325,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 0 0 -0.1234567890 0.013717421 -0.1234567890 0.013717421 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.013717421 +0.123456789 0.013717421 1.2345678901 0.137174210011 1.2345678901 0.137174210011 1.2345678901 0.137174210011 @@ -355,7 +355,7 @@ NULL NULL 123456789.0123456 13717421.001371733333 123456789.0123456789 13717421.0013717421 1234567890.123456 137174210.013717333333 -1234567890.1234567890 137174210.013717421 +1234567890.123456789 137174210.013717421 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -408,13 +408,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 0 0 -0.1234567890 0.0045724736667 -0.1234567890 0.0045724736667 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.0045724736667 +0.123456789 0.0045724736667 1.2345678901 0.0457247366704 1.2345678901 0.0457247366704 1.2345678901 0.0457247366704 @@ -438,7 +438,7 @@ NULL NULL 123456789.0123456 4572473.6671239111111 123456789.0123456789 4572473.6671239140333 1234567890.123456 45724736.6712391111111 -1234567890.1234567890 45724736.6712391403333 +1234567890.123456789 45724736.6712391403333 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -491,13 +491,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 0 0 -0.1234567890 0.01524157875019052100 -0.1234567890 0.01524157875019052100 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.015241578750190521 +0.123456789 0.015241578750190521 1.2345678901 1.52415787526596567801 1.2345678901 1.52415787526596567801 1.2345678901 1.52415787526596567801 @@ -521,7 +521,7 @@ NULL NULL 123456789.0123456 15241578753238817.26870921383936 123456789.0123456789 15241578753238836.75019051998750190521 1234567890.123456 NULL -1234567890.1234567890 NULL +1234567890.123456789 NULL PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION diff --git a/ql/src/test/results/clientpositive/decimal_trailing.q.out b/ql/src/test/results/clientpositive/decimal_trailing.q.out index c6991fd..6cfe282 100644 --- a/ql/src/test/results/clientpositive/decimal_trailing.q.out +++ b/ql/src/test/results/clientpositive/decimal_trailing.q.out @@ -43,13 +43,13 @@ POSTHOOK: Input: default@decimal_trailing 0 0 0 1 0 0 2 NULL NULL -3 1.0000 1.00000000 -4 10.0000 10.00000000 -5 100.0000 100.00000000 -6 1000.0000 1000.00000000 -7 10000.0000 10000.00000000 -8 100000.0000 100000.00000000 -9 NULL 1000000.00000000 +3 1 1 +4 10 10 +5 100 100 +6 1000 1000 +7 10000 10000 +8 100000 100000 +9 NULL 1000000 10 NULL NULL 11 NULL NULL 12 NULL NULL @@ -58,18 +58,18 @@ POSTHOOK: Input: default@decimal_trailing 15 NULL NULL 16 NULL NULL 17 NULL NULL -18 1.0000 1.00000000 -19 10.000 10.0000000 -20 100.00 100.000000 -21 1000.0 1000.00000 -22 100000 10000.0000 -23 0.0000 0.00000000 -24 0.000 0.0000000 -25 0.00 0.000000 -26 0.0 0.00000 -27 0 0.00000 -28 12313.2000 134134.31252500 -29 99999.9990 134134.31242553 +18 1 1 +19 10 10 +20 100 100 +21 1000 1000 +22 100000 10000 +23 0 0 +24 0 0 +25 0 0 +26 0 0 +27 0 0 +28 12313.2 134134.312525 +29 99999.999 134134.31242553 PREHOOK: query: DROP TABLE DECIMAL_TRAILING PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_trailing diff --git a/ql/src/test/results/clientpositive/decimal_udf.q.out b/ql/src/test/results/clientpositive/decimal_udf.q.out index 6bd876a..1a30346 100644 --- a/ql/src/test/results/clientpositive/decimal_udf.q.out +++ b/ql/src/test/results/clientpositive/decimal_udf.q.out @@ -57,7 +57,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -8800 NULL -0.0000000000 +0 0 200 20 @@ -76,7 +76,7 @@ NULL -0.6 -0.66 -0.666 -2.0 +2 4 6.28 -2.24 @@ -84,15 +84,15 @@ NULL -2.244 2.24 2.244 -248.00 +248 250.4 -2510.98 6.28 6.28 -6.280 -2.0000000000 --2469135780.2469135780 -2469135780.2469135600 +6.28 +2 +-2469135780.246913578 +2469135780.24691356 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF @@ -124,7 +124,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 0 NULL -0.0000000000 +0 0 200 20 @@ -143,7 +143,7 @@ NULL -0.3 -0.33 -0.333 -2.0 +2 4 6.14 -2.12 @@ -151,15 +151,15 @@ NULL -12.122 2.12 2.122 -248.00 +248 250.2 -2510.49 6.14 6.14 -7.140 -2.0000000000 --2469135780.1234567890 -2469135780.1234567800 +7.14 +2 +-2469135780.123456789 +2469135780.12345678 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF @@ -327,42 +327,42 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 0 NULL -0.0000000000 0 0 0 0 -0.0 -0.00 0 0 0 0 -0.0 -0.00 -0.0 -0.00 -0.000 -0.0 -0.00 -0.000 -0.0 0 -0.00 -0.00 -0.00 -0.000 -0.00 -0.000 -0.00 -0.0 -0.00 -0.00 -0.00 -0.000 -0.0000000000 -0.0000000000 -0.0000000000 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF @@ -394,7 +394,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -8800 NULL -0.0000000000 +0 0 0 0 @@ -413,7 +413,7 @@ NULL -0.3 -0.33 -0.333 -0.0 +0 0 0.14 -0.12 @@ -421,15 +421,15 @@ NULL 9.878 0.12 0.122 -0.00 +0 0.2 -0.49 0.14 0.14 --0.860 -0.0000000000 --0.1234567890 -0.1234567800 +-0.86 +0 +-0.123456789 +0.12345678 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF @@ -597,7 +597,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 19360000 NULL -0.00000000000000000000 +0 0 10000 100 @@ -616,7 +616,7 @@ NULL 0.09 0.1089 0.110889 -1.00 +1 4 9.8596 1.2544 @@ -624,13 +624,13 @@ NULL 1.258884 1.2544 1.258884 -15376.0000 +15376 15675.04 1576255.1401 9.8596 9.8596 -9.859600 -1.00000000000000000000 +9.8596 +1 NULL NULL PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 @@ -671,7 +671,7 @@ POSTHOOK: Input: default@decimal_udf 200 200 20 20 2 2 -1.0 1 +1 1 2 2 3.14 3 -1.12 -1 @@ -679,15 +679,15 @@ POSTHOOK: Input: default@decimal_udf -1.122 -11 1.12 1 1.122 1 -124.00 124 +124 124 125.2 125 -1255.49 -1255 3.14 3 3.14 3 -3.140 4 -1.0000000000 1 --1234567890.1234567890 -1234567890 -1234567890.1234567800 1234567890 +3.14 4 +1 1 +-1234567890.123456789 -1234567890 +1234567890.12345678 1234567890 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF @@ -719,26 +719,26 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -19360000 NULL -0.0000000000 +0 0 10000 100 1 -0.0 -0.00 +0 +0 40000 400 4 0 -0.0 -0.00 -0.0 -0.00 -0.000 -0.0 -0.00 -0.000 -1.0 +0 +0 +0 +0 +0 +0 +0 +0 +1 4 9.42 1.12 @@ -746,15 +746,15 @@ NULL 12.342 1.12 1.122 -15376.00 -15650.0 +15376 +15650 1575639.95 9.42 9.42 -12.560 -1.0000000000 -1524157875171467887.5019052100 -1524157875171467876.3907942000 +12.56 +1 +1524157875171467887.50190521 +1524157875171467876.3907942 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF @@ -1078,7 +1078,7 @@ POSTHOOK: Input: default@decimal_udf 0.785 1 1.0000000001 -1.000000000099999992710 +1.00000000009999999271 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 @@ -1235,7 +1235,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 4400 NULL -0.0000000000 +0 0 100 10 @@ -1254,7 +1254,7 @@ NULL 0.3 0.33 0.333 -1.0 +1 2 3.14 1.12 @@ -1262,15 +1262,15 @@ NULL 1.122 1.12 1.122 -124.00 +124 125.2 1255.49 3.14 3.14 -3.140 -1.0000000000 -1234567890.1234567890 -1234567890.1234567800 +3.14 +1 +1234567890.123456789 +1234567890.12345678 PREHOOK: query: -- avg EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value PREHOOK: type: QUERY @@ -1359,23 +1359,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.1234567890 +-1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.123456789 -1255 -1255.49 -1255.49 -1255.49 -11 -1.122 -1.122 -1.122 -1 -1.12 -1.12 -2.24 -0 0.02538461538461538461538 0.02538461538462 0.3300000000 -1 1.0484 1.0484 5.2420000000 +0 0.02538461538461538461538 0.02538461538462 0.33 +1 1.0484 1.0484 5.242 2 2 2 4 3 3.14 3.14 9.42 -4 3.14 3.14 3.140 +4 3.14 3.14 3.14 10 10 10 10 20 20 20 20 100 100 100 100 -124 124 124 124.00 +124 124 124 124 125 125.2 125.2 125.2 200 200 200 200 4400 -4400 -4400 -4400 -1234567890 1234567890.12345678 1234567890.12345678 1234567890.1234567800 +1234567890 1234567890.12345678 1234567890.12345678 1234567890.12345678 PREHOOK: query: -- negative EXPLAIN SELECT -key FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1409,7 +1409,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 4400 NULL -0.0000000000 +0 0 -100 -10 @@ -1428,7 +1428,7 @@ NULL 0.3 0.33 0.333 --1.0 +-1 -2 -3.14 1.12 @@ -1436,15 +1436,15 @@ NULL 1.122 -1.12 -1.122 --124.00 +-124 -125.2 1255.49 -3.14 -3.14 --3.140 --1.0000000000 -1234567890.1234567890 --1234567890.1234567800 +-3.14 +-1 +1234567890.123456789 +-1234567890.12345678 PREHOOK: query: -- positive EXPLAIN SELECT +key FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1478,7 +1478,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -4400 NULL -0.0000000000 +0 0 100 10 @@ -1497,7 +1497,7 @@ NULL -0.3 -0.33 -0.333 -1.0 +1 2 3.14 -1.12 @@ -1505,15 +1505,15 @@ NULL -1.122 1.12 1.122 -124.00 +124 125.2 -1255.49 3.14 3.14 -3.140 -1.0000000000 --1234567890.1234567890 -1234567890.1234567800 +3.14 +1 +-1234567890.123456789 +1234567890.12345678 PREHOOK: query: -- ceiling EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1683,42 +1683,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --4400.00 +-4400 NULL -0.00 -0.00 -100.00 -10.00 -1.00 -0.10 +0 +0 +100 +10 +1 +0.1 0.01 -200.00 -20.00 -2.00 -0.00 -0.20 +200 +20 +2 +0 +0.2 0.02 -0.30 +0.3 0.33 0.33 --0.30 +-0.3 -0.33 -0.33 -1.00 -2.00 +1 +2 3.14 -1.12 -1.12 -1.12 1.12 1.12 -124.00 -125.20 +124 +125.2 -1255.49 3.14 3.14 3.14 -1.00 +1 -1234567890.12 1234567890.12 PREHOOK: query: -- power @@ -1827,38 +1827,38 @@ NULL NULL 1 1 -0.0 -0.00 -0.000 +0 +0 +0 1 1 0 NULL -0.0 -0.00 -0.10 -0.010 -0.0010 -0.10 -0.010 -0.0010 -0.0 0 -1.00 +0 +0.1 +0.01 +0.001 +0.1 +0.01 +0.001 +0 +0 +1 -0.12 -0.12 -0.122 0.44 0.439 -1.00 -1.0 +1 +1 -626.745 -1.00 -1.00 -1.000 -0.0000000000 +1 +1 +1 +0 -617283944.0617283945 -1.0000000000 +1 PREHOOK: query: -- stddev, var EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value PREHOOK: type: QUERY @@ -2150,7 +2150,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --1234567890.1234567890 +-1234567890.123456789 PREHOOK: query: -- max EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -2213,7 +2213,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -1234567890.1234567800 +1234567890.12345678 PREHOOK: query: -- count EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out index 4dcbb0f..b21cb11 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_vectorization.q.out @@ -880,7 +880,7 @@ Partition Parameters: numFiles 2 numRows 32 rawDataSize 640 - totalSize 1348 + totalSize 1352 #### A masked pattern was here #### # Storage Information @@ -924,7 +924,7 @@ Partition Parameters: numFiles 2 numRows 6 rawDataSize 120 - totalSize 1050 + totalSize 1054 #### A masked pattern was here #### # Storage Information @@ -968,7 +968,7 @@ Partition Parameters: numFiles 2 numRows 14 rawDataSize 280 - totalSize 1166 + totalSize 1170 #### A masked pattern was here #### # Storage Information @@ -1012,7 +1012,7 @@ Partition Parameters: numFiles 2 numRows 6 rawDataSize 120 - totalSize 1050 + totalSize 1054 #### A masked pattern was here #### # Storage Information @@ -1055,7 +1055,7 @@ Partition Parameters: numFiles 8 numRows 32 rawDataSize 640 - totalSize 4340 + totalSize 4356 #### A masked pattern was here #### # Storage Information @@ -1098,7 +1098,7 @@ Partition Parameters: numFiles 8 numRows 6 rawDataSize 120 - totalSize 2094 + totalSize 2110 #### A masked pattern was here #### # Storage Information @@ -1141,7 +1141,7 @@ Partition Parameters: numFiles 8 numRows 32 rawDataSize 640 - totalSize 4326 + totalSize 4342 #### A masked pattern was here #### # Storage Information @@ -1184,7 +1184,7 @@ Partition Parameters: numFiles 8 numRows 6 rawDataSize 120 - totalSize 2094 + totalSize 2110 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out index 9b57bbb..5ee36f7 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_optimization2.q.out @@ -1183,7 +1183,7 @@ Partition Parameters: numFiles 1 numRows 11 rawDataSize 88 - totalSize 417 + totalSize 419 #### A masked pattern was here #### # Storage Information @@ -1245,7 +1245,7 @@ Partition Parameters: numFiles 1 numRows 13 rawDataSize 104 - totalSize 440 + totalSize 442 #### A masked pattern was here #### # Storage Information @@ -1404,7 +1404,7 @@ Partition Parameters: numFiles 1 numRows 11 rawDataSize 88 - totalSize 417 + totalSize 419 #### A masked pattern was here #### # Storage Information @@ -1466,7 +1466,7 @@ Partition Parameters: numFiles 1 numRows 13 rawDataSize 104 - totalSize 440 + totalSize 442 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out index 29cfefa..d0057dd 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out @@ -136,7 +136,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 343 + totalSize 345 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -179,7 +179,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 361 + totalSize 363 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -269,7 +269,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 343 + totalSize 345 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -312,7 +312,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 361 + totalSize 363 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -469,7 +469,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 258 + totalSize 260 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -513,7 +513,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 257 + totalSize 259 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -557,7 +557,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 245 + totalSize 247 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -601,7 +601,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 245 + totalSize 247 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -688,7 +688,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 258 + totalSize 260 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -732,7 +732,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 257 + totalSize 259 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -776,7 +776,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 245 + totalSize 247 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -820,7 +820,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 245 + totalSize 247 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out index db5c77c..412e6d6 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out @@ -153,7 +153,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 343 + totalSize 345 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -196,7 +196,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 362 + totalSize 364 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -239,7 +239,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 386 + totalSize 388 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -282,7 +282,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 388 + totalSize 390 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -372,7 +372,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 343 + totalSize 345 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -415,7 +415,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 362 + totalSize 364 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -458,7 +458,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 386 + totalSize 388 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -501,7 +501,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 388 + totalSize 390 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -604,7 +604,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 343 + totalSize 345 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -647,7 +647,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 362 + totalSize 364 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -690,7 +690,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 386 + totalSize 388 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -733,7 +733,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 388 + totalSize 390 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -819,7 +819,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 343 + totalSize 345 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -862,7 +862,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 362 + totalSize 364 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -905,7 +905,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 386 + totalSize 388 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -948,7 +948,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 388 + totalSize 390 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1106,7 +1106,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 262 + totalSize 264 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1150,7 +1150,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 276 + totalSize 278 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1194,7 +1194,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 277 + totalSize 279 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1238,7 +1238,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 258 + totalSize 260 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1282,7 +1282,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 255 + totalSize 257 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1326,7 +1326,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 267 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1370,7 +1370,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 274 + totalSize 276 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1414,7 +1414,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 245 + totalSize 247 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1458,7 +1458,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 245 + totalSize 247 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1502,7 +1502,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 277 + totalSize 279 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1546,7 +1546,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 270 + totalSize 272 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1633,7 +1633,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 262 + totalSize 264 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1677,7 +1677,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 276 + totalSize 278 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1721,7 +1721,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 277 + totalSize 279 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1765,7 +1765,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 258 + totalSize 260 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1809,7 +1809,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 255 + totalSize 257 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1853,7 +1853,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 265 + totalSize 267 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1897,7 +1897,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 274 + totalSize 276 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1941,7 +1941,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 245 + totalSize 247 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1985,7 +1985,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 245 + totalSize 247 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2029,7 +2029,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 277 + totalSize 279 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2073,7 +2073,7 @@ STAGE PLANS: serialization.ddl struct loc_orc_2d { string state, i32 locid} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 270 + totalSize 272 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde diff --git a/ql/src/test/results/clientpositive/gby_star.q.out b/ql/src/test/results/clientpositive/gby_star.q.out new file mode 100644 index 0000000..3021171 --- /dev/null +++ b/ql/src/test/results/clientpositive/gby_star.q.out @@ -0,0 +1,370 @@ +PREHOOK: query: explain +select *, sum(key) from src group by key, value limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select *, sum(key) from src group by key, value limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(key) + keys: key (type: string), value (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: double) + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: double) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: select *, sum(key) from src group by key, value limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select *, sum(key) from src group by key, value limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 0.0 +10 val_10 10.0 +100 val_100 200.0 +103 val_103 206.0 +104 val_104 208.0 +105 val_105 105.0 +11 val_11 11.0 +111 val_111 111.0 +113 val_113 226.0 +114 val_114 114.0 +PREHOOK: query: explain +select *, sum(key) from src where key < 100 group by key, value limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select *, sum(key) from src where key < 100 group by key, value limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key < 100) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(key) + keys: key (type: string), value (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: double) + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: double) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: select *, sum(key) from src where key < 100 group by key, value limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select *, sum(key) from src where key < 100 group by key, value limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 0.0 +10 val_10 10.0 +11 val_11 11.0 +12 val_12 24.0 +15 val_15 30.0 +17 val_17 17.0 +18 val_18 36.0 +19 val_19 19.0 +2 val_2 2.0 +20 val_20 20.0 +PREHOOK: query: explain +select *, sum(key) from (select key from src where key < 100) a group by key limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select *, sum(key) from (select key from src where key < 100) a group by key limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key < 100) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(_col0) + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double) + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: double) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: select *, sum(key) from (select key from src where key < 100) a group by key limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select *, sum(key) from (select key from src where key < 100) a group by key limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 0.0 +10 10.0 +11 11.0 +12 24.0 +15 30.0 +17 17.0 +18 36.0 +19 19.0 +2 2.0 +20 20.0 +PREHOOK: query: explain +select a.*, sum(src.key) from (select key from src where key < 100) a +inner join src on a.key = src.key group by a.key limit 10 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select a.*, sum(src.key) from (select key from src where key < 100) a +inner join src on a.key = src.key group by a.key limit 10 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (key < 100) (type: boolean) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 166 Data size: 1763 Basic stats: COMPLETE Column stats: NONE + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} + 1 {KEY.reducesinkkey0} + outputColumnNames: _col0, _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(_col1) + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: double) + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: double) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 137 Data size: 1455 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 10 + Processor Tree: + ListSink + +PREHOOK: query: select a.*, sum(src.key) from (select key from src where key < 100) a +inner join src on a.key = src.key group by a.key limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select a.*, sum(src.key) from (select key from src where key < 100) a +inner join src on a.key = src.key group by a.key limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 0.0 +10 10.0 +11 11.0 +12 48.0 +15 60.0 +17 17.0 +18 72.0 +19 19.0 +2 2.0 +20 20.0 diff --git a/ql/src/test/results/clientpositive/groupby_ppd.q.out b/ql/src/test/results/clientpositive/groupby_ppd.q.out index 403ebd3..8b8a96e 100644 --- a/ql/src/test/results/clientpositive/groupby_ppd.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppd.q.out @@ -79,7 +79,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: 1 (type: int), _col1 (type: int) + expressions: _col1 (type: int), 1 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/index_skewtable.q.out b/ql/src/test/results/clientpositive/index_skewtable.q.out new file mode 100644 index 0000000..02fd1f4 --- /dev/null +++ b/ql/src/test/results/clientpositive/index_skewtable.q.out @@ -0,0 +1,216 @@ +PREHOOK: query: -- Test creating an index on skewed table + +-- Create a skew table +CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@kv +POSTHOOK: query: -- Test creating an index on skewed table + +-- Create a skew table +CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@kv +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@kv +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@kv +PREHOOK: query: -- Create and build an index +CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: type: CREATEINDEX +PREHOOK: Input: default@kv +POSTHOOK: query: -- Create and build an index +CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: type: CREATEINDEX +POSTHOOK: Input: default@kv +POSTHOOK: Output: default@default__kv_kv_index__ +PREHOOK: query: DESCRIBE FORMATTED default__kv_kv_index__ +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@default__kv_kv_index__ +POSTHOOK: query: DESCRIBE FORMATTED default__kv_kv_index__ +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@default__kv_kv_index__ +# col_name data_type comment + +value string +_bucketname string +_offsets array + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: INDEX_TABLE +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [Order(col:value, order:1)] +PREHOOK: query: ALTER INDEX kv_index ON kv REBUILD +PREHOOK: type: ALTERINDEX_REBUILD +PREHOOK: Input: default@kv +PREHOOK: Output: default@default__kv_kv_index__ +POSTHOOK: query: ALTER INDEX kv_index ON kv REBUILD +POSTHOOK: type: ALTERINDEX_REBUILD +POSTHOOK: Input: default@kv +POSTHOOK: Output: default@default__kv_kv_index__ +POSTHOOK: Lineage: default__kv_kv_index__._bucketname SIMPLE [(kv)kv.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: default__kv_kv_index__._offsets EXPRESSION [(kv)kv.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] +POSTHOOK: Lineage: default__kv_kv_index__.value SIMPLE [(kv)kv.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: -- Run a query that uses the index +EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value +PREHOOK: type: QUERY +POSTHOOK: query: -- Run a query that uses the index +EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-3 is a root stage + Stage-8 depends on stages: Stage-3 , consists of Stage-5, Stage-4, Stage-6 + Stage-5 + Stage-2 depends on stages: Stage-5, Stage-4, Stage-7 + Stage-1 depends on stages: Stage-2 + Stage-4 + Stage-6 + Stage-7 depends on stages: Stage-6 + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: default.default__kv_kv_index__ + filterExpr: (value > '15') (type: boolean) + Filter Operator + predicate: (value > '15') (type: boolean) + Select Operator + expressions: _bucketname (type: string), _offsets (type: array) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-8 + Conditional Operator + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-2 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: kv + filterExpr: (value > '15') (type: boolean) + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: (value > '15') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col1 (type: string) + sort order: + + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col0 (type: string) + Reduce Operator Tree: + Select Operator + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-6 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-7 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT * FROM kv WHERE value > '15' ORDER BY value +PREHOOK: type: QUERY +PREHOOK: Input: default@default__kv_kv_index__ +PREHOOK: Input: default@kv +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM kv WHERE value > '15' ORDER BY value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@default__kv_kv_index__ +POSTHOOK: Input: default@kv +#### A masked pattern was here #### +8 18 +8 18 +2 22 +PREHOOK: query: DROP INDEX kv_index ON kv +PREHOOK: type: DROPINDEX +PREHOOK: Input: default@kv +POSTHOOK: query: DROP INDEX kv_index ON kv +POSTHOOK: type: DROPINDEX +POSTHOOK: Input: default@kv +PREHOOK: query: DROP TABLE kv +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@kv +PREHOOK: Output: default@kv +POSTHOOK: query: DROP TABLE kv +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@kv +POSTHOOK: Output: default@kv diff --git a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out new file mode 100644 index 0000000..8d36ae4 --- /dev/null +++ b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out @@ -0,0 +1,66 @@ +PREHOOK: query: -- This test checks that selecting from an acid table and inserting into a non-acid table works. +create table sample_06(name varchar(50), age int, gpa decimal(3, 2)) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@sample_06 +POSTHOOK: query: -- This test checks that selecting from an acid table and inserting into a non-acid table works. +create table sample_06(name varchar(50), age int, gpa decimal(3, 2)) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@sample_06 +PREHOOK: query: insert into table sample_06 values ('aaa', 35, 3.00), ('bbb', 32, 3.00), ('ccc', 32, 3.00), ('ddd', 35, 3.00), ('eee', 32, 3.00) +PREHOOK: type: QUERY +PREHOOK: Input: default@values__tmp__table__1 +PREHOOK: Output: default@sample_06 +POSTHOOK: query: insert into table sample_06 values ('aaa', 35, 3.00), ('bbb', 32, 3.00), ('ccc', 32, 3.00), ('ddd', 35, 3.00), ('eee', 32, 3.00) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@values__tmp__table__1 +POSTHOOK: Output: default@sample_06 +POSTHOOK: Lineage: sample_06.age EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] +POSTHOOK: Lineage: sample_06.gpa EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col3, type:string, comment:), ] +POSTHOOK: Lineage: sample_06.name EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from sample_06 where gpa = 3.00 +PREHOOK: type: QUERY +PREHOOK: Input: default@sample_06 +#### A masked pattern was here #### +POSTHOOK: query: select * from sample_06 where gpa = 3.00 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@sample_06 +#### A masked pattern was here #### +eee 32 3 +ccc 32 3 +bbb 32 3 +ddd 35 3 +aaa 35 3 +PREHOOK: query: create table tab1 (name varchar(50), age int, gpa decimal(3, 2)) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab1 +POSTHOOK: query: create table tab1 (name varchar(50), age int, gpa decimal(3, 2)) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab1 +PREHOOK: query: insert into table tab1 select * from sample_06 where gpa = 3.00 +PREHOOK: type: QUERY +PREHOOK: Input: default@sample_06 +PREHOOK: Output: default@tab1 +POSTHOOK: query: insert into table tab1 select * from sample_06 where gpa = 3.00 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@sample_06 +POSTHOOK: Output: default@tab1 +POSTHOOK: Lineage: tab1.age SIMPLE [(sample_06)sample_06.FieldSchema(name:age, type:int, comment:null), ] +POSTHOOK: Lineage: tab1.gpa SIMPLE [(sample_06)sample_06.FieldSchema(name:gpa, type:decimal(3,2), comment:null), ] +POSTHOOK: Lineage: tab1.name SIMPLE [(sample_06)sample_06.FieldSchema(name:name, type:varchar(50), comment:null), ] +PREHOOK: query: select * from tab1 +PREHOOK: type: QUERY +PREHOOK: Input: default@tab1 +#### A masked pattern was here #### +POSTHOOK: query: select * from tab1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab1 +#### A masked pattern was here #### +eee 32 3 +ccc 32 3 +bbb 32 3 +ddd 35 3 +aaa 35 3 diff --git a/ql/src/test/results/clientpositive/orc_analyze.q.out b/ql/src/test/results/clientpositive/orc_analyze.q.out index e718b29..b422db5 100644 --- a/ql/src/test/results/clientpositive/orc_analyze.q.out +++ b/ql/src/test/results/clientpositive/orc_analyze.q.out @@ -107,7 +107,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3121 + totalSize 3123 #### A masked pattern was here #### # Storage Information @@ -197,7 +197,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3121 + totalSize 3123 #### A masked pattern was here #### # Storage Information @@ -313,7 +313,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information @@ -358,7 +358,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2043 + totalSize 2045 #### A masked pattern was here #### # Storage Information @@ -460,7 +460,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information @@ -505,7 +505,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2043 + totalSize 2045 #### A masked pattern was here #### # Storage Information @@ -627,7 +627,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information @@ -672,7 +672,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2043 + totalSize 2045 #### A masked pattern was here #### # Storage Information @@ -780,7 +780,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information @@ -825,7 +825,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2043 + totalSize 2045 #### A masked pattern was here #### # Storage Information @@ -992,7 +992,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out index fcd84d1..7a9c772 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out @@ -138,6 +138,19 @@ POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchem POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: -- 5 files total analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY @@ -148,7 +161,7 @@ analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -Found 5 items +Found 6 items #### A masked pattern was here #### PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY @@ -163,6 +176,8 @@ POSTHOOK: Input: default@orc_merge5b 13 bar 80.0 2 1969-12-31 16:00:05 13 bar 80.0 2 1969-12-31 16:00:05 13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 @@ -173,6 +188,7 @@ POSTHOOK: Input: default@orc_merge5b 5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 PREHOOK: query: alter table orc_merge5b concatenate PREHOOK: type: ALTER_TABLE_MERGE PREHOOK: Input: default@orc_merge5b @@ -191,7 +207,7 @@ analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -Found 3 items +Found 4 items #### A masked pattern was here #### PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY @@ -206,11 +222,14 @@ POSTHOOK: Input: default@orc_merge5b 13 bar 80.0 2 1969-12-31 16:00:05 13 bar 80.0 2 1969-12-31 16:00:05 13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out index 359d4ac..9a7b2b7 100644 --- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out +++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out @@ -259,7 +259,7 @@ POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_pred #### A masked pattern was here #### -124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.40 yard duty +124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.4 yard duty PREHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@orc_pred @@ -268,7 +268,7 @@ POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_pred #### A masked pattern was here #### -124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.40 yard duty +124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.4 yard duty PREHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred WHERE t IS NOT NULL AND t < 0 diff --git a/ql/src/test/results/clientpositive/parquet_create.q.out b/ql/src/test/results/clientpositive/parquet_create.q.out index 2a94693..16a2cfd 100644 --- a/ql/src/test/results/clientpositive/parquet_create.q.out +++ b/ql/src/test/results/clientpositive/parquet_create.q.out @@ -118,17 +118,6 @@ POSTHOOK: Lineage: parquet_create.lst SIMPLE [(parquet_create_staging)parquet_cr POSTHOOK: Lineage: parquet_create.mp SIMPLE [(parquet_create_staging)parquet_create_staging.FieldSchema(name:mp, type:map, comment:null), ] POSTHOOK: Lineage: parquet_create.str SIMPLE [(parquet_create_staging)parquet_create_staging.FieldSchema(name:str, type:string, comment:null), ] POSTHOOK: Lineage: parquet_create.strct SIMPLE [(parquet_create_staging)parquet_create_staging.FieldSchema(name:strct, type:struct, comment:null), ] -PREHOOK: query: SELECT * FROM parquet_create group by id -PREHOOK: type: QUERY -PREHOOK: Input: default@parquet_create -#### A masked pattern was here #### -POSTHOOK: query: SELECT * FROM parquet_create group by id -POSTHOOK: type: QUERY -POSTHOOK: Input: default@parquet_create -#### A masked pattern was here #### -1 -2 -3 PREHOOK: query: SELECT id, count(0) FROM parquet_create group by id PREHOOK: type: QUERY PREHOOK: Input: default@parquet_create diff --git a/ql/src/test/results/clientpositive/parquet_decimal.q.out b/ql/src/test/results/clientpositive/parquet_decimal.q.out index 5767c57..cd87b92 100644 --- a/ql/src/test/results/clientpositive/parquet_decimal.q.out +++ b/ql/src/test/results/clientpositive/parquet_decimal.q.out @@ -63,9 +63,9 @@ Mary 4.33 Cluck 5.96 Tom -12.25 Mary 33.33 -Tom 19.00 -Beck 0.00 -Beck 79.90 +Tom 19 +Beck 0 +Beck 79.9 PREHOOK: query: TRUNCATE TABLE parq_dec PREHOOK: type: TRUNCATETABLE PREHOOK: Output: default@parq_dec @@ -140,12 +140,12 @@ POSTHOOK: Input: default@parq_dec1 77.3 55.7 4.3 -6.0 +6 12.3 33.3 0.2 3.2 -8.0 +8 PREHOOK: query: DROP TABLE dec PREHOOK: type: DROPTABLE PREHOOK: Input: default@dec diff --git a/ql/src/test/results/clientpositive/parquet_decimal1.q.out b/ql/src/test/results/clientpositive/parquet_decimal1.q.out index 0f71b1e..bd146f8 100644 --- a/ql/src/test/results/clientpositive/parquet_decimal1.q.out +++ b/ql/src/test/results/clientpositive/parquet_decimal1.q.out @@ -28,7 +28,7 @@ POSTHOOK: query: SELECT * FROM dec_comp POSTHOOK: type: QUERY POSTHOOK: Input: default@dec_comp #### A masked pattern was here #### -[3.14,6.28,7.30] {"k1":92.77,"k2":29.39} {"i":5,"d":9.03} +[3.14,6.28,7.3] {"k1":92.77,"k2":29.39} {"i":5,"d":9.03} [12.4,1.33,0.34] {"k2":2.79,"k4":29.09} {"i":11,"d":0.03} PREHOOK: query: DROP TABLE IF EXISTS parq_dec_comp PREHOOK: type: DROPTABLE @@ -72,8 +72,8 @@ POSTHOOK: query: SELECT * FROM parq_dec_comp POSTHOOK: type: QUERY POSTHOOK: Input: default@parq_dec_comp #### A masked pattern was here #### -[3.14,6.28,7.30] {"k2":29.39,"k1":92.77} {"i":5,"d":9.03} -[12.40,1.33,0.34] {"k4":29.09,"k2":2.79} {"i":11,"d":0.03} +[3.14,6.28,7.3] {"k2":29.39,"k1":92.77} {"i":5,"d":9.03} +[12.4,1.33,0.34] {"k4":29.09,"k2":2.79} {"i":11,"d":0.03} PREHOOK: query: DROP TABLE dec_comp PREHOOK: type: DROPTABLE PREHOOK: Input: default@dec_comp diff --git a/ql/src/test/results/clientpositive/partition_multilevels.q.out b/ql/src/test/results/clientpositive/partition_multilevels.q.out new file mode 100644 index 0000000..107522d --- /dev/null +++ b/ql/src/test/results/clientpositive/partition_multilevels.q.out @@ -0,0 +1,1624 @@ +PREHOOK: query: create table partition_test_multilevel (key string, value string) partitioned by (level1 string, level2 string, level3 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partition_test_multilevel +POSTHOOK: query: create table partition_test_multilevel (key string, value string) partitioned by (level1 string, level2 string, level3 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partition_test_multilevel +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=111,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=111,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=111,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=111,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=111,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=111,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=111,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=111,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=222,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=222,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=222,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=222,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=222,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=222,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=222,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=222,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=333,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=333,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=333,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=333,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=333,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=333,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='1111', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=333,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=1111,level2=333,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='11') select key, value from srcpart tablesample (11 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=111,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=111,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='22') select key, value from srcpart tablesample (12 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=111,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=111,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='33') select key, value from srcpart tablesample (13 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=111,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=111,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='111', level3='44') select key, value from srcpart tablesample (14 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=111,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=111,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='11') select key, value from srcpart tablesample (15 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=222,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=222,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='22') select key, value from srcpart tablesample (16 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=222,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=222,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='33') select key, value from srcpart tablesample (17 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=222,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=222,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='222', level3='44') select key, value from srcpart tablesample (18 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=222,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=222,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='11') select key, value from srcpart tablesample (19 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=333,level3=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=333,level3=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='22') select key, value from srcpart tablesample (20 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=333,level3=22).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=333,level3=22).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='33') select key, value from srcpart tablesample (21 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=333,level3=33).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=333,level3=33).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@srcpart +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +PREHOOK: Output: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +POSTHOOK: query: insert overwrite table partition_test_multilevel partition(level1='2222', level2='333', level3='44') select key, value from srcpart tablesample (22 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcpart +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 +POSTHOOK: Output: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=333,level3=44).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_test_multilevel PARTITION(level1=2222,level2=333,level3=44).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: -- beginning level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: -- beginning level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +2222 111 11 11 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 >= '2222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 >= '2222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +2222 111 11 11 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 !='2222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 !='2222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 11 11 +1111 111 22 12 +1111 111 33 13 +1111 111 44 14 +1111 222 11 15 +1111 222 22 16 +1111 222 33 17 +1111 222 44 18 +1111 333 11 19 +1111 333 22 20 +1111 333 33 21 +1111 333 44 22 +PREHOOK: query: -- middle level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 = '222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: -- middle level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 = '222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +#### A masked pattern was here #### +1111 222 11 15 +1111 222 22 16 +1111 222 33 17 +1111 222 44 18 +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level2 <= '222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level2 <= '222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +#### A masked pattern was here #### +1111 111 11 11 +1111 111 22 12 +1111 111 33 13 +1111 111 44 14 +1111 222 11 15 +1111 222 22 16 +1111 222 33 17 +1111 222 44 18 +2222 111 11 11 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level2 != '222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level2 != '222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 11 11 +1111 111 22 12 +1111 111 33 13 +1111 111 44 14 +1111 333 11 19 +1111 333 22 20 +1111 333 33 21 +1111 333 44 22 +2222 111 11 11 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: -- ending level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level3 = '22' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +#### A masked pattern was here #### +POSTHOOK: query: -- ending level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level3 = '22' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +#### A masked pattern was here #### +1111 111 22 12 +1111 222 22 16 +1111 333 22 20 +2222 111 22 12 +2222 222 22 16 +2222 333 22 20 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level3 >= '22' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level3 >= '22' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 22 12 +1111 111 33 13 +1111 111 44 14 +1111 222 22 16 +1111 222 33 17 +1111 222 44 18 +1111 333 22 20 +1111 333 33 21 +1111 333 44 22 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +2222 333 22 20 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level3 != '22' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level3 != '22' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 11 11 +1111 111 33 13 +1111 111 44 14 +1111 222 11 15 +1111 222 33 17 +1111 222 44 18 +1111 333 11 19 +1111 333 33 21 +1111 333 44 22 +2222 111 11 11 +2222 111 33 13 +2222 111 44 14 +2222 222 11 15 +2222 222 33 17 +2222 222 44 18 +2222 333 11 19 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: -- two different levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 >= '222' and level3 = '33' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +POSTHOOK: query: -- two different levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 >= '222' and level3 = '33' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +1111 222 33 17 +1111 333 33 21 +2222 222 33 17 +2222 333 33 21 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 <= '1111' and level3 >= '33' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 <= '1111' and level3 >= '33' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 33 13 +1111 111 44 14 +1111 222 33 17 +1111 222 44 18 +1111 333 33 21 +1111 333 44 22 +PREHOOK: query: -- all levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' and level2 >= '222' and level3 <= '33' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +POSTHOOK: query: -- all levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' and level2 >= '222' and level3 <= '33' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +PREHOOK: query: -- between +select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +POSTHOOK: query: -- between +select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +PREHOOK: query: explain select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: partition_test_multilevel + Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '2222' (type: string), level2 (type: string), level3 (type: string) + outputColumnNames: level1, level2, level3 + Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: level1 (type: string), level2 (type: string), level3 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) + Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE + value expressions: _col3 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- beginning level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: -- beginning level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +2222 111 11 11 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 >= '2222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 >= '2222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +2222 111 11 11 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 !='2222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 !='2222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 11 11 +1111 111 22 12 +1111 111 33 13 +1111 111 44 14 +1111 222 11 15 +1111 222 22 16 +1111 222 33 17 +1111 222 44 18 +1111 333 11 19 +1111 333 22 20 +1111 333 33 21 +1111 333 44 22 +PREHOOK: query: -- middle level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 = '222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: -- middle level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 = '222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +#### A masked pattern was here #### +1111 222 11 15 +1111 222 22 16 +1111 222 33 17 +1111 222 44 18 +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level2 <= '222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level2 <= '222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +#### A masked pattern was here #### +1111 111 11 11 +1111 111 22 12 +1111 111 33 13 +1111 111 44 14 +1111 222 11 15 +1111 222 22 16 +1111 222 33 17 +1111 222 44 18 +2222 111 11 11 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level2 != '222' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level2 != '222' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 11 11 +1111 111 22 12 +1111 111 33 13 +1111 111 44 14 +1111 333 11 19 +1111 333 22 20 +1111 333 33 21 +1111 333 44 22 +2222 111 11 11 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: -- ending level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level3 = '22' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +#### A masked pattern was here #### +POSTHOOK: query: -- ending level partition in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level3 = '22' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +#### A masked pattern was here #### +1111 111 22 12 +1111 222 22 16 +1111 333 22 20 +2222 111 22 12 +2222 222 22 16 +2222 333 22 20 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level3 >= '22' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level3 >= '22' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 22 12 +1111 111 33 13 +1111 111 44 14 +1111 222 22 16 +1111 222 33 17 +1111 222 44 18 +1111 333 22 20 +1111 333 33 21 +1111 333 44 22 +2222 111 22 12 +2222 111 33 13 +2222 111 44 14 +2222 222 22 16 +2222 222 33 17 +2222 222 44 18 +2222 333 22 20 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level3 != '22' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level3 != '22' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 11 11 +1111 111 33 13 +1111 111 44 14 +1111 222 11 15 +1111 222 33 17 +1111 222 44 18 +1111 333 11 19 +1111 333 33 21 +1111 333 44 22 +2222 111 11 11 +2222 111 33 13 +2222 111 44 14 +2222 222 11 15 +2222 222 33 17 +2222 222 44 18 +2222 333 11 19 +2222 333 33 21 +2222 333 44 22 +PREHOOK: query: -- two different levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 >= '222' and level3 = '33' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +POSTHOOK: query: -- two different levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level2 >= '222' and level3 = '33' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +1111 222 33 17 +1111 333 33 21 +2222 222 33 17 +2222 333 33 21 +PREHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 <= '1111' and level3 >= '33' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +#### A masked pattern was here #### +POSTHOOK: query: select level1, level2, level3, count(*) from partition_test_multilevel where level1 <= '1111' and level3 >= '33' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=111/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=222/level3=44 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=1111/level2=333/level3=44 +#### A masked pattern was here #### +1111 111 33 13 +1111 111 44 14 +1111 222 33 17 +1111 222 44 18 +1111 333 33 21 +1111 333 44 22 +PREHOOK: query: -- all levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' and level2 >= '222' and level3 <= '33' group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +POSTHOOK: query: -- all levels of partitions in predicate +select level1, level2, level3, count(*) from partition_test_multilevel where level1 = '2222' and level2 >= '222' and level3 <= '33' group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +PREHOOK: query: -- between +select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_test_multilevel +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +PREHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +POSTHOOK: query: -- between +select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_test_multilevel +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=222/level3=33 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=11 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=22 +POSTHOOK: Input: default@partition_test_multilevel@level1=2222/level2=333/level3=33 +#### A masked pattern was here #### +2222 222 11 15 +2222 222 22 16 +2222 222 33 17 +2222 333 11 19 +2222 333 22 20 +2222 333 33 21 +PREHOOK: query: explain select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3 +PREHOOK: type: QUERY +POSTHOOK: query: explain select level1, level2, level3, count(*) from partition_test_multilevel where (level1 = '2222') and (level2 between '222' and '333') and (level3 between '11' and '33') group by level1, level2, level3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: partition_test_multilevel + Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: '2222' (type: string), level2 (type: string), level3 (type: string) + outputColumnNames: level1, level2, level3 + Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: level1 (type: string), level2 (type: string), level3 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) + Statistics: Num rows: 108 Data size: 1146 Basic stats: COMPLETE Column stats: NONE + value expressions: _col3 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 54 Data size: 573 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/ql/src/test/results/clientpositive/partition_timestamp.q.out b/ql/src/test/results/clientpositive/partition_timestamp.q.out new file mode 100644 index 0000000..bc6ab10 --- /dev/null +++ b/ql/src/test/results/clientpositive/partition_timestamp.q.out @@ -0,0 +1,312 @@ +PREHOOK: query: drop table partition_timestamp_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table partition_timestamp_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table partition_timestamp_1 (key string, value string) partitioned by (dt timestamp, region string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partition_timestamp_1 +POSTHOOK: query: create table partition_timestamp_1 (key string, value string) partitioned by (dt timestamp, region string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partition_timestamp_1 +PREHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2000-01-01 01:00:00', region= '1') + select * from src tablesample (10 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +POSTHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2000-01-01 01:00:00', region= '1') + select * from src tablesample (10 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2000-01-01 01:00:00,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2000-01-01 01:00:00,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2000-01-01 02:00:00', region= '2') + select * from src tablesample (5 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_timestamp_1@dt=2000-01-01 02%3A00%3A00/region=2 +POSTHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2000-01-01 02:00:00', region= '2') + select * from src tablesample (5 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_timestamp_1@dt=2000-01-01 02%3A00%3A00/region=2 +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2000-01-01 02:00:00,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2000-01-01 02:00:00,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 01:00:00', region= '2020-20-20') + select * from src tablesample (5 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_timestamp_1@dt=2001-01-01 01%3A00%3A00/region=2020-20-20 +POSTHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 01:00:00', region= '2020-20-20') + select * from src tablesample (5 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_timestamp_1@dt=2001-01-01 01%3A00%3A00/region=2020-20-20 +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2001-01-01 01:00:00,region=2020-20-20).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2001-01-01 01:00:00,region=2020-20-20).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 02:00:00', region= '1') + select * from src tablesample (20 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +POSTHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 02:00:00', region= '1') + select * from src tablesample (20 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2001-01-01 02:00:00,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2001-01-01 02:00:00,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 03:00:00', region= '10') + select * from src tablesample (11 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_timestamp_1@dt=2001-01-01 03%3A00%3A00/region=10 +POSTHOOK: query: insert overwrite table partition_timestamp_1 partition(dt='2001-01-01 03:00:00', region= '10') + select * from src tablesample (11 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_timestamp_1@dt=2001-01-01 03%3A00%3A00/region=10 +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2001-01-01 03:00:00,region=10).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp_1 PARTITION(dt=2001-01-01 03:00:00,region=10).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select distinct dt from partition_timestamp_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 02%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 01%3A00%3A00/region=2020-20-20 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 03%3A00%3A00/region=10 +#### A masked pattern was here #### +POSTHOOK: query: select distinct dt from partition_timestamp_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 02%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 01%3A00%3A00/region=2020-20-20 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 03%3A00%3A00/region=10 +#### A masked pattern was here #### +2000-01-01 01:00:00 +2000-01-01 02:00:00 +2001-01-01 01:00:00 +2001-01-01 02:00:00 +2001-01-01 03:00:00 +PREHOOK: query: select * from partition_timestamp_1 where dt = '2000-01-01 01:00:00' and region = '2' order by key,value +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_timestamp_1 where dt = '2000-01-01 01:00:00' and region = '2' order by key,value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +#### A masked pattern was here #### +PREHOOK: query: -- 10 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- 10 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +10 +PREHOOK: query: -- 10. Also try with string value in predicate +select count(*) from partition_timestamp_1 where dt = '2000-01-01 01:00:00' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- 10. Also try with string value in predicate +select count(*) from partition_timestamp_1 where dt = '2000-01-01 01:00:00' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +10 +PREHOOK: query: -- 5 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 02:00:00' and region = '2' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 02%3A00%3A00/region=2 +#### A masked pattern was here #### +POSTHOOK: query: -- 5 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 02:00:00' and region = '2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 02%3A00%3A00/region=2 +#### A masked pattern was here #### +5 +PREHOOK: query: -- 11 +select count(*) from partition_timestamp_1 where dt = timestamp '2001-01-01 03:00:00' and region = '10' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 03%3A00%3A00/region=10 +#### A masked pattern was here #### +POSTHOOK: query: -- 11 +select count(*) from partition_timestamp_1 where dt = timestamp '2001-01-01 03:00:00' and region = '10' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 03%3A00%3A00/region=10 +#### A masked pattern was here #### +11 +PREHOOK: query: -- 30 +select count(*) from partition_timestamp_1 where region = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- 30 +select count(*) from partition_timestamp_1 where region = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +#### A masked pattern was here #### +30 +PREHOOK: query: -- 0 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' and region = '3' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +#### A masked pattern was here #### +POSTHOOK: query: -- 0 +select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' and region = '3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +#### A masked pattern was here #### +0 +PREHOOK: query: -- 0 +select count(*) from partition_timestamp_1 where dt = timestamp '1999-01-01 01:00:00' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +#### A masked pattern was here #### +POSTHOOK: query: -- 0 +select count(*) from partition_timestamp_1 where dt = timestamp '1999-01-01 01:00:00' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +#### A masked pattern was here #### +0 +PREHOOK: query: -- Try other comparison operations + +-- 20 +select count(*) from partition_timestamp_1 where dt > timestamp '2000-01-01 01:00:00' and region = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- Try other comparison operations + +-- 20 +select count(*) from partition_timestamp_1 where dt > timestamp '2000-01-01 01:00:00' and region = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +#### A masked pattern was here #### +20 +PREHOOK: query: -- 10 +select count(*) from partition_timestamp_1 where dt < timestamp '2000-01-02 01:00:00' and region = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- 10 +select count(*) from partition_timestamp_1 where dt < timestamp '2000-01-02 01:00:00' and region = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +10 +PREHOOK: query: -- 20 +select count(*) from partition_timestamp_1 where dt >= timestamp '2000-01-02 01:00:00' and region = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- 20 +select count(*) from partition_timestamp_1 where dt >= timestamp '2000-01-02 01:00:00' and region = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +#### A masked pattern was here #### +20 +PREHOOK: query: -- 10 +select count(*) from partition_timestamp_1 where dt <= timestamp '2000-01-01 01:00:00' and region = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- 10 +select count(*) from partition_timestamp_1 where dt <= timestamp '2000-01-01 01:00:00' and region = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +10 +PREHOOK: query: -- 20 +select count(*) from partition_timestamp_1 where dt <> timestamp '2000-01-01 01:00:00' and region = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- 20 +select count(*) from partition_timestamp_1 where dt <> timestamp '2000-01-01 01:00:00' and region = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 02%3A00%3A00/region=1 +#### A masked pattern was here #### +20 +PREHOOK: query: -- 10 +select count(*) from partition_timestamp_1 where dt between timestamp '1999-12-30 12:00:00' and timestamp '2000-01-03 12:00:00' and region = '1' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: -- 10 +select count(*) from partition_timestamp_1 where dt between timestamp '1999-12-30 12:00:00' and timestamp '2000-01-03 12:00:00' and region = '1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +10 +PREHOOK: query: -- Try a string key with timestamp-like strings + +-- 5 +select count(*) from partition_timestamp_1 where region = '2020-20-20' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 01%3A00%3A00/region=2020-20-20 +#### A masked pattern was here #### +POSTHOOK: query: -- Try a string key with timestamp-like strings + +-- 5 +select count(*) from partition_timestamp_1 where region = '2020-20-20' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 01%3A00%3A00/region=2020-20-20 +#### A masked pattern was here #### +5 +PREHOOK: query: -- 5 +select count(*) from partition_timestamp_1 where region > '2010-01-01' +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 01%3A00%3A00/region=2020-20-20 +#### A masked pattern was here #### +POSTHOOK: query: -- 5 +select count(*) from partition_timestamp_1 where region > '2010-01-01' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Input: default@partition_timestamp_1@dt=2001-01-01 01%3A00%3A00/region=2020-20-20 +#### A masked pattern was here #### +5 +PREHOOK: query: drop table partition_timestamp_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@partition_timestamp_1 +PREHOOK: Output: default@partition_timestamp_1 +POSTHOOK: query: drop table partition_timestamp_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@partition_timestamp_1 +POSTHOOK: Output: default@partition_timestamp_1 diff --git a/ql/src/test/results/clientpositive/partition_timestamp2.q.out b/ql/src/test/results/clientpositive/partition_timestamp2.q.out new file mode 100644 index 0000000..365df69 --- /dev/null +++ b/ql/src/test/results/clientpositive/partition_timestamp2.q.out @@ -0,0 +1,395 @@ +PREHOOK: query: drop table partition_timestamp2_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table partition_timestamp2_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table partition_timestamp2_1 (key string, value string) partitioned by (dt timestamp, region int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partition_timestamp2_1 +POSTHOOK: query: create table partition_timestamp2_1 (key string, value string) partitioned by (dt timestamp, region int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partition_timestamp2_1 +PREHOOK: query: -- test timestamp literal syntax +from (select * from src tablesample (1 rows)) x +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 01:00:00', region=1) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1999-01-01 00:00:00', region=2) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1999-01-01 01:00:00', region=2) select * +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +POSTHOOK: query: -- test timestamp literal syntax +from (select * from src tablesample (1 rows)) x +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 01:00:00', region=1) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1999-01-01 00:00:00', region=2) select * +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1999-01-01 01:00:00', region=2) select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=1999-01-01 00:00:00,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=1999-01-01 00:00:00,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=1999-01-01 01:00:00,region=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=1999-01-01 01:00:00,region=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=2000-01-01 00:00:00,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=2000-01-01 00:00:00,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=2000-01-01 01:00:00,region=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=2000-01-01 01:00:00,region=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select distinct dt from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select distinct dt from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +1999-01-01 00:00:00 +1999-01-01 01:00:00 +2000-01-01 00:00:00 +2000-01-01 01:00:00 +PREHOOK: query: select * from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +238 val_238 1999-01-01 00:00:00 2 +238 val_238 1999-01-01 01:00:00 2 +238 val_238 2000-01-01 00:00:00 1 +238 val_238 2000-01-01 01:00:00 1 +PREHOOK: query: -- insert overwrite +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) + select 'changed_key', 'changed_value' from src tablesample (2 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: query: -- insert overwrite +insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) + select 'changed_key', 'changed_value' from src tablesample (2 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=2000-01-01 00:00:00,region=1).key SIMPLE [] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=2000-01-01 00:00:00,region=1).value SIMPLE [] +PREHOOK: query: select * from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +238 val_238 1999-01-01 00:00:00 2 +238 val_238 1999-01-01 01:00:00 2 +changed_key changed_value 2000-01-01 00:00:00 1 +changed_key changed_value 2000-01-01 00:00:00 1 +238 val_238 2000-01-01 01:00:00 1 +PREHOOK: query: -- truncate +truncate table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: query: -- truncate +truncate table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: query: select distinct dt from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select distinct dt from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +1999-01-01 00:00:00 +1999-01-01 01:00:00 +2000-01-01 00:00:00 +2000-01-01 01:00:00 +PREHOOK: query: select * from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +238 val_238 1999-01-01 00:00:00 2 +238 val_238 1999-01-01 01:00:00 2 +238 val_238 2000-01-01 01:00:00 1 +PREHOOK: query: -- alter table add partition +alter table partition_timestamp2_1 add partition (dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: type: ALTERTABLE_ADDPARTS +PREHOOK: Output: default@partition_timestamp2_1 +POSTHOOK: query: -- alter table add partition +alter table partition_timestamp2_1 add partition (dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: type: ALTERTABLE_ADDPARTS +POSTHOOK: Output: default@partition_timestamp2_1 +POSTHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: query: select distinct dt from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select distinct dt from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +1980-01-02 00:00:00 +1999-01-01 00:00:00 +1999-01-01 01:00:00 +2000-01-01 00:00:00 +2000-01-01 01:00:00 +PREHOOK: query: select * from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +238 val_238 1999-01-01 00:00:00 2 +238 val_238 1999-01-01 01:00:00 2 +238 val_238 2000-01-01 01:00:00 1 +PREHOOK: query: -- alter table drop +alter table partition_timestamp2_1 drop partition (dt=timestamp '1999-01-01 01:00:00', region=2) +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +POSTHOOK: query: -- alter table drop +alter table partition_timestamp2_1 drop partition (dt=timestamp '1999-01-01 01:00:00', region=2) +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00/region=2 +PREHOOK: query: select distinct dt from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select distinct dt from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +1980-01-02 00:00:00 +1999-01-01 00:00:00 +2000-01-01 00:00:00 +2000-01-01 01:00:00 +PREHOOK: query: select * from partition_timestamp2_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_timestamp2_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +238 val_238 1999-01-01 00:00:00 2 +238 val_238 2000-01-01 01:00:00 1 +PREHOOK: query: -- alter table set serde +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: type: ALTERPARTITION_SERIALIZER +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: query: -- alter table set serde +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +POSTHOOK: type: ALTERPARTITION_SERIALIZER +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: query: -- alter table set fileformat +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + set fileformat rcfile +PREHOOK: type: ALTERPARTITION_FILEFORMAT +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: query: -- alter table set fileformat +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + set fileformat rcfile +POSTHOOK: type: ALTERPARTITION_FILEFORMAT +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: query: describe extended partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: query: describe extended partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partition_timestamp2_1 +key string +value string +dt timestamp +region int + +# Partition Information +# col_name data_type comment + +dt timestamp +region int + +#### A masked pattern was here #### +PREHOOK: query: insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + select * from src tablesample (2 rows) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: query: insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) + select * from src tablesample (2 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=1980-01-02 00:00:00,region=3).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: partition_timestamp2_1 PARTITION(dt=1980-01-02 00:00:00,region=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: select * from partition_timestamp2_1 order by key,value,dt,region +PREHOOK: type: QUERY +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +PREHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +POSTHOOK: query: select * from partition_timestamp2_1 order by key,value,dt,region +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00/region=2 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00/region=1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00/region=1 +#### A masked pattern was here #### +238 val_238 1980-01-02 00:00:00 3 +238 val_238 1999-01-01 00:00:00 2 +238 val_238 2000-01-01 01:00:00 1 +86 val_86 1980-01-02 00:00:00 3 +PREHOOK: query: -- alter table set location +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +#### A masked pattern was here #### +PREHOOK: type: ALTERPARTITION_LOCATION +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +#### A masked pattern was here #### +POSTHOOK: query: -- alter table set location +alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +#### A masked pattern was here #### +POSTHOOK: type: ALTERPARTITION_LOCATION +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +#### A masked pattern was here #### +PREHOOK: query: describe extended partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: query: describe extended partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@partition_timestamp2_1 +key string +value string +dt timestamp +region int + +# Partition Information +# col_name data_type comment + +dt timestamp +region int + +#### A masked pattern was here #### +PREHOOK: query: -- alter table touch +alter table partition_timestamp2_1 touch partition(dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: type: ALTERTABLE_TOUCH +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: query: -- alter table touch +alter table partition_timestamp2_1 touch partition(dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: type: ALTERTABLE_TOUCH +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +POSTHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00/region=3 +PREHOOK: query: drop table partition_timestamp2_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@partition_timestamp2_1 +PREHOOK: Output: default@partition_timestamp2_1 +POSTHOOK: query: drop table partition_timestamp2_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@partition_timestamp2_1 +POSTHOOK: Output: default@partition_timestamp2_1 diff --git a/ql/src/test/results/clientpositive/partition_type_in_plan.q.out b/ql/src/test/results/clientpositive/partition_type_in_plan.q.out new file mode 100644 index 0000000..58b8e0c --- /dev/null +++ b/ql/src/test/results/clientpositive/partition_type_in_plan.q.out @@ -0,0 +1,57 @@ +PREHOOK: query: -- Test partition column type is considered as the type given in table def +-- and not as 'string' +CREATE TABLE datePartTbl(col1 string) PARTITIONED BY (date_prt date) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@datePartTbl +POSTHOOK: query: -- Test partition column type is considered as the type given in table def +-- and not as 'string' +CREATE TABLE datePartTbl(col1 string) PARTITIONED BY (date_prt date) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@datePartTbl +PREHOOK: query: -- Add test partitions and some sample data +INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-09') + SELECT 'col1-2014-08-09' FROM src LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dateparttbl@date_prt=2014-08-09 +POSTHOOK: query: -- Add test partitions and some sample data +INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-09') + SELECT 'col1-2014-08-09' FROM src LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dateparttbl@date_prt=2014-08-09 +POSTHOOK: Lineage: dateparttbl PARTITION(date_prt=2014-08-09).col1 SIMPLE [] +PREHOOK: query: INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-10') + SELECT 'col1-2014-08-10' FROM src LIMIT 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@dateparttbl@date_prt=2014-08-10 +POSTHOOK: query: INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-10') + SELECT 'col1-2014-08-10' FROM src LIMIT 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@dateparttbl@date_prt=2014-08-10 +POSTHOOK: Lineage: dateparttbl PARTITION(date_prt=2014-08-10).col1 SIMPLE [] +PREHOOK: query: -- Query where 'date_prt' value is restricted to given values in IN operator. +SELECT * FROM datePartTbl WHERE date_prt IN (CAST('2014-08-09' AS DATE), CAST('2014-08-08' AS DATE)) +PREHOOK: type: QUERY +PREHOOK: Input: default@dateparttbl +PREHOOK: Input: default@dateparttbl@date_prt=2014-08-09 +#### A masked pattern was here #### +POSTHOOK: query: -- Query where 'date_prt' value is restricted to given values in IN operator. +SELECT * FROM datePartTbl WHERE date_prt IN (CAST('2014-08-09' AS DATE), CAST('2014-08-08' AS DATE)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dateparttbl +POSTHOOK: Input: default@dateparttbl@date_prt=2014-08-09 +#### A masked pattern was here #### +col1-2014-08-09 2014-08-09 +PREHOOK: query: DROP TABLE datePartTbl +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@dateparttbl +PREHOOK: Output: default@dateparttbl +POSTHOOK: query: DROP TABLE datePartTbl +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@dateparttbl +POSTHOOK: Output: default@dateparttbl diff --git a/ql/src/test/results/clientpositive/serde_regex.q.out b/ql/src/test/results/clientpositive/serde_regex.q.out index 65eef4f..19187ba 100644 --- a/ql/src/test/results/clientpositive/serde_regex.q.out +++ b/ql/src/test/results/clientpositive/serde_regex.q.out @@ -201,7 +201,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@serde_regex1 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -210,7 +210,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -221,8 +221,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1.0 1 -1.000000000000000000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 @@ -230,14 +230,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: DROP TABLE serde_regex1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@serde_regex1 diff --git a/ql/src/test/results/clientpositive/show_roles.q.out b/ql/src/test/results/clientpositive/show_roles.q.out index c3c8c6d..f90bba0 100644 --- a/ql/src/test/results/clientpositive/show_roles.q.out +++ b/ql/src/test/results/clientpositive/show_roles.q.out @@ -14,4 +14,3 @@ admin public role1 role2 - diff --git a/ql/src/test/results/clientpositive/tez/alter_merge_orc.q.out b/ql/src/test/results/clientpositive/tez/alter_merge_orc.q.out index 78f40f4..22914a8 100644 --- a/ql/src/test/results/clientpositive/tez/alter_merge_orc.q.out +++ b/ql/src/test/results/clientpositive/tez/alter_merge_orc.q.out @@ -48,9 +48,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:3 -totalFileSize:7488 -maxFileSize:2496 -minFileSize:2496 +totalFileSize:7494 +maxFileSize:2498 +minFileSize:2498 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test @@ -91,9 +91,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:1 -totalFileSize:7167 -maxFileSize:7167 -minFileSize:7167 +totalFileSize:7169 +maxFileSize:7169 +minFileSize:7169 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test @@ -171,9 +171,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:3 -totalFileSize:7488 -maxFileSize:2496 -minFileSize:2496 +totalFileSize:7494 +maxFileSize:2498 +minFileSize:2498 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test_part @@ -218,9 +218,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:1 -totalFileSize:7167 -maxFileSize:7167 -minFileSize:7167 +totalFileSize:7169 +maxFileSize:7169 +minFileSize:7169 #### A masked pattern was here #### PREHOOK: query: select count(1) from src_orc_merge_test_part diff --git a/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out index f8486ad..cdcc18a 100644 --- a/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out +++ b/ql/src/test/results/clientpositive/tez/alter_merge_stats_orc.q.out @@ -48,9 +48,9 @@ columns:struct columns { i32 key, string value} partitioned:false partitionColumns: totalNumberFiles:3 -totalFileSize:7488 -maxFileSize:2496 -minFileSize:2496 +totalFileSize:7494 +maxFileSize:2498 +minFileSize:2498 #### A masked pattern was here #### PREHOOK: query: desc extended src_orc_merge_test_stat @@ -94,7 +94,7 @@ Table Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7488 + totalSize 7494 #### A masked pattern was here #### # Storage Information @@ -146,7 +146,7 @@ Table Parameters: numFiles 1 numRows 1500 rawDataSize 141000 - totalSize 7167 + totalSize 7169 #### A masked pattern was here #### # Storage Information @@ -216,9 +216,9 @@ columns:struct columns { i32 key, string value} partitioned:true partitionColumns:struct partition_columns { string ds} totalNumberFiles:3 -totalFileSize:7488 -maxFileSize:2496 -minFileSize:2496 +totalFileSize:7494 +maxFileSize:2498 +minFileSize:2498 #### A masked pattern was here #### PREHOOK: query: desc formatted src_orc_merge_test_part_stat partition (ds='2011') @@ -249,7 +249,7 @@ Partition Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7488 + totalSize 7494 #### A masked pattern was here #### # Storage Information @@ -300,7 +300,7 @@ Partition Parameters: numFiles 3 numRows 1500 rawDataSize 141000 - totalSize 7488 + totalSize 7494 #### A masked pattern was here #### # Storage Information @@ -359,7 +359,7 @@ Partition Parameters: numFiles 1 numRows 1500 rawDataSize 141000 - totalSize 7167 + totalSize 7169 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out index cc13e37..afc261a 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out @@ -44,19 +44,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Map 3 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: @@ -67,14 +67,14 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -107,38 +107,38 @@ STAGE PLANS: Map Operator Tree: TableScan alias: d1 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 4 Map Operator Tree: TableScan alias: d2 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 5 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: @@ -149,10 +149,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col5, _col6 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) Reducer 3 Reduce Operator Tree: @@ -163,14 +163,14 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {VALUE._col6} 1 {VALUE._col0} {VALUE._col1} outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -210,36 +210,36 @@ STAGE PLANS: Map Operator Tree: TableScan alias: d1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map 5 Map Operator Tree: TableScan alias: d2 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map 6 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: @@ -250,35 +250,35 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 4 Reduce Operator Tree: @@ -289,14 +289,14 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} outputColumnNames: _col0, _col1, _col5 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -331,27 +331,27 @@ STAGE PLANS: Map Operator Tree: TableScan alias: d1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string) Map 5 Map Operator Tree: TableScan alias: d2 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE Map 6 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Reducer 2 Reduce Operator Tree: @@ -362,35 +362,35 @@ STAGE PLANS: 0 {VALUE._col0} 1 outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 51 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 51 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 51 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 4 Reduce Operator Tree: @@ -401,14 +401,14 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} outputColumnNames: _col0, _col1, _col5 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -447,47 +447,47 @@ STAGE PLANS: Map Operator Tree: TableScan alias: d1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map 5 Map Operator Tree: TableScan alias: d2 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map 6 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Merge Join Operator @@ -497,35 +497,35 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Reducer 3 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 4 Reduce Operator Tree: @@ -536,14 +536,14 @@ STAGE PLANS: 0 {VALUE._col0} 1 {VALUE._col0} outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -554,14 +554,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Stage: Stage-0 diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out index 69c3001..0e407c5 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out @@ -44,16 +44,16 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE value expressions: key (type: string), value (type: string) Map 2 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -66,14 +66,14 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col5, _col6 input vertices: 1 Map 1 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -106,10 +106,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: d1 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -122,30 +122,30 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col5, _col6 input vertices: 1 Map 2 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) Map 2 Map Operator Tree: TableScan alias: d2 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE value expressions: value (type: string) Map 3 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -158,14 +158,14 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 input vertices: 0 Map 1 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -205,10 +205,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: d1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -221,39 +221,39 @@ STAGE PLANS: outputColumnNames: _col0 input vertices: 1 Map 3 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: d2 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -266,14 +266,14 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col5 input vertices: 1 Reducer 2 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -284,14 +284,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Stage: Stage-0 @@ -323,7 +323,7 @@ STAGE PLANS: Map Operator Tree: TableScan alias: d1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -336,34 +336,34 @@ STAGE PLANS: outputColumnNames: _col0 input vertices: 1 Map 3 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 11 Data size: 114 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: d2 - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 114 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -376,14 +376,14 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col5 input vertices: 1 Reducer 2 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -394,14 +394,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 51 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 51 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 5 Data size: 51 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Stage: Stage-0 @@ -436,10 +436,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: d1 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -452,67 +452,67 @@ STAGE PLANS: outputColumnNames: _col0 input vertices: 1 Map 3 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1 Data size: 125 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Map 3 Map Operator Tree: TableScan alias: d2 - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: key is not null (type: boolean) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: key (type: string) sort order: + Map-reduce partition columns: key (type: string) - Statistics: Num rows: 1 Data size: 114 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 5 Data size: 52 Basic stats: COMPLETE Column stats: NONE Map 4 Map Operator Tree: TableScan alias: a - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: string) outputColumnNames: key - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Group By Operator keys: key (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator sort order: - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Statistics: Num rows: 2 Data size: 22 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 5 Reduce Operator Tree: @@ -520,11 +520,11 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 @@ -537,14 +537,14 @@ STAGE PLANS: outputColumnNames: _col0, _col1 input vertices: 1 Reducer 2 - Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/tez/ctas.q.out b/ql/src/test/results/clientpositive/tez/ctas.q.out index 843d6fe..b90716e 100644 --- a/ql/src/test/results/clientpositive/tez/ctas.q.out +++ b/ql/src/test/results/clientpositive/tez/ctas.q.out @@ -151,8 +151,8 @@ Table Type: MANAGED_TABLE Table Parameters: COLUMN_STATS_ACCURATE true numFiles 1 - numRows 0 - rawDataSize 0 + numRows 10 + rawDataSize 96 totalSize 106 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out index 71e3eb5..6f61441 100644 --- a/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_opt_vectorization.q.out @@ -932,7 +932,7 @@ Partition Parameters: numFiles 2 numRows 32 rawDataSize 640 - totalSize 1348 + totalSize 1352 #### A masked pattern was here #### # Storage Information @@ -976,7 +976,7 @@ Partition Parameters: numFiles 2 numRows 6 rawDataSize 120 - totalSize 1050 + totalSize 1054 #### A masked pattern was here #### # Storage Information @@ -1020,7 +1020,7 @@ Partition Parameters: numFiles 2 numRows 14 rawDataSize 280 - totalSize 1166 + totalSize 1170 #### A masked pattern was here #### # Storage Information @@ -1064,7 +1064,7 @@ Partition Parameters: numFiles 2 numRows 6 rawDataSize 120 - totalSize 1050 + totalSize 1054 #### A masked pattern was here #### # Storage Information @@ -1107,7 +1107,7 @@ Partition Parameters: numFiles 8 numRows 32 rawDataSize 640 - totalSize 4340 + totalSize 4356 #### A masked pattern was here #### # Storage Information @@ -1150,7 +1150,7 @@ Partition Parameters: numFiles 8 numRows 6 rawDataSize 120 - totalSize 2094 + totalSize 2110 #### A masked pattern was here #### # Storage Information @@ -1193,7 +1193,7 @@ Partition Parameters: numFiles 8 numRows 32 rawDataSize 640 - totalSize 4326 + totalSize 4342 #### A masked pattern was here #### # Storage Information @@ -1236,7 +1236,7 @@ Partition Parameters: numFiles 8 numRows 6 rawDataSize 120 - totalSize 2094 + totalSize 2110 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out index 52bef6f..42e2cde 100644 --- a/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out +++ b/ql/src/test/results/clientpositive/tez/dynpart_sort_optimization2.q.out @@ -1234,7 +1234,7 @@ Partition Parameters: numFiles 1 numRows 11 rawDataSize 88 - totalSize 417 + totalSize 419 #### A masked pattern was here #### # Storage Information @@ -1296,7 +1296,7 @@ Partition Parameters: numFiles 1 numRows 13 rawDataSize 104 - totalSize 440 + totalSize 442 #### A masked pattern was here #### # Storage Information @@ -1466,7 +1466,7 @@ Partition Parameters: numFiles 1 numRows 11 rawDataSize 88 - totalSize 417 + totalSize 419 #### A masked pattern was here #### # Storage Information @@ -1528,7 +1528,7 @@ Partition Parameters: numFiles 1 numRows 13 rawDataSize 104 - totalSize 440 + totalSize 442 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/join_nullsafe.q.out b/ql/src/test/results/clientpositive/tez/join_nullsafe.q.out new file mode 100644 index 0000000..b71028a --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/join_nullsafe.q.out @@ -0,0 +1,1641 @@ +PREHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE myinput1(key int, value int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@myinput1 +POSTHOOK: query: -- SORT_QUERY_RESULTS + +CREATE TABLE myinput1(key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@myinput1 +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@myinput1 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE myinput1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@myinput1 +PREHOOK: query: -- merging +explain select * from myinput1 a join myinput1 b on a.key<=>b.value +PREHOOK: type: QUERY +POSTHOOK: query: -- merging +explain select * from myinput1 a join myinput1 b on a.key<=>b.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: int) + sort order: + + Map-reduce partition columns: value (type: int) + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + value expressions: key (type: int) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: int) + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {VALUE._col0} {KEY.reducesinkkey0} + nullSafes: [true] + outputColumnNames: _col0, _col1, _col5, _col6 + Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 +100 100 100 100 +NULL 10 10 NULL +NULL 10 48 NULL +NULL 10 NULL NULL +NULL 35 10 NULL +NULL 35 48 NULL +NULL 35 NULL NULL +NULL NULL 10 NULL +NULL NULL 48 NULL +NULL NULL NULL NULL +PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: int) + sort order: + + Map-reduce partition columns: value (type: int) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + value expressions: key (type: int) + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: int) + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: int) + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {VALUE._col0} {KEY.reducesinkkey0} + 2 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key=c.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 10 NULL +100 100 100 100 100 100 +PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: int) + sort order: + + Map-reduce partition columns: value (type: int) + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + value expressions: key (type: int) + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: int) + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: int) + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 {KEY.reducesinkkey0} {VALUE._col0} + 1 {VALUE._col0} {KEY.reducesinkkey0} + 2 {KEY.reducesinkkey0} {VALUE._col0} + nullSafes: [true] + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value join myinput1 c on a.key<=>c.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 10 NULL +100 100 100 100 100 100 +NULL 10 10 NULL NULL 10 +NULL 10 10 NULL NULL 35 +NULL 10 10 NULL NULL NULL +NULL 10 48 NULL NULL 10 +NULL 10 48 NULL NULL 35 +NULL 10 48 NULL NULL NULL +NULL 10 NULL NULL NULL 10 +NULL 10 NULL NULL NULL 35 +NULL 10 NULL NULL NULL NULL +NULL 35 10 NULL NULL 10 +NULL 35 10 NULL NULL 35 +NULL 35 10 NULL NULL NULL +NULL 35 48 NULL NULL 10 +NULL 35 48 NULL NULL 35 +NULL 35 48 NULL NULL NULL +NULL 35 NULL NULL NULL 10 +NULL 35 NULL NULL NULL 35 +NULL 35 NULL NULL NULL NULL +NULL NULL 10 NULL NULL 10 +NULL NULL 10 NULL NULL 35 +NULL NULL 10 NULL NULL NULL +NULL NULL 48 NULL NULL 10 +NULL NULL 48 NULL NULL 35 +NULL NULL 48 NULL NULL NULL +NULL NULL NULL NULL NULL 10 +NULL NULL NULL NULL NULL 35 +NULL NULL NULL NULL NULL NULL +PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: int), key (type: int) + sort order: ++ + Map-reduce partition columns: value (type: int), key (type: int) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int), value (type: int) + sort order: ++ + Map-reduce partition columns: key (type: int), value (type: int) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is not null (type: boolean) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int), value (type: int) + sort order: ++ + Map-reduce partition columns: key (type: int), value (type: int) + Statistics: Num rows: 2 Data size: 17 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} + 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} + 2 {KEY.reducesinkkey0} {KEY.reducesinkkey1} + nullSafes: [true, false] + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value=b.key join myinput1 c on a.key<=>c.key AND a.value=c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +100 100 100 100 100 100 +NULL 10 10 NULL NULL 10 +PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: value (type: int), key (type: int) + sort order: ++ + Map-reduce partition columns: value (type: int), key (type: int) + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Map 3 + Map Operator Tree: + TableScan + alias: c + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int), value (type: int) + sort order: ++ + Map-reduce partition columns: key (type: int), value (type: int) + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int), value (type: int) + sort order: ++ + Map-reduce partition columns: key (type: int), value (type: int) + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + condition expressions: + 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} + 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} + 2 {KEY.reducesinkkey0} {KEY.reducesinkkey1} + nullSafes: [true, true] + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 + Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.value<=>b.key join myinput1 c on a.key<=>c.key AND a.value<=>c.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 10 NULL +100 100 100 100 100 100 +NULL 10 10 NULL NULL 10 +NULL NULL NULL NULL NULL NULL +PREHOOK: query: -- outer joins +SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: -- outer joins +SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 +100 100 100 100 +48 NULL NULL NULL +NULL 10 10 NULL +NULL 10 48 NULL +NULL 10 NULL NULL +NULL 35 10 NULL +NULL 35 48 NULL +NULL 35 NULL NULL +NULL NULL 10 NULL +NULL NULL 48 NULL +NULL NULL NULL NULL +PREHOOK: query: SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM myinput1 a RIGHT OUTER JOIN myinput1 b ON a.key<=>b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 +100 100 100 100 +NULL 10 10 NULL +NULL 10 48 NULL +NULL 10 NULL NULL +NULL 35 10 NULL +NULL 35 48 NULL +NULL 35 NULL NULL +NULL NULL 10 NULL +NULL NULL 48 NULL +NULL NULL NULL 35 +NULL NULL NULL NULL +PREHOOK: query: SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM myinput1 a FULL OUTER JOIN myinput1 b ON a.key<=>b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 +100 100 100 100 +48 NULL NULL NULL +NULL 10 10 NULL +NULL 10 48 NULL +NULL 10 NULL NULL +NULL 35 10 NULL +NULL 35 48 NULL +NULL 35 NULL NULL +NULL NULL 10 NULL +NULL NULL 48 NULL +NULL NULL NULL 35 +NULL NULL NULL NULL +PREHOOK: query: -- map joins +SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: -- map joins +SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 +100 100 100 100 +NULL 10 10 NULL +NULL 10 48 NULL +NULL 10 NULL NULL +NULL 35 10 NULL +NULL 35 48 NULL +NULL 35 NULL NULL +NULL NULL 10 NULL +NULL NULL 48 NULL +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +10 NULL NULL 10 +100 100 100 100 +NULL 10 10 NULL +NULL 10 48 NULL +NULL 10 NULL NULL +NULL 35 10 NULL +NULL 35 48 NULL +NULL 35 NULL NULL +NULL NULL 10 NULL +NULL NULL 48 NULL +NULL NULL NULL NULL +PREHOOK: query: CREATE TABLE smb_input(key int, value int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@smb_input +POSTHOOK: query: CREATE TABLE smb_input(key int, value int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@smb_input +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@smb_input +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in4.txt' into table smb_input +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@smb_input +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@smb_input +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table smb_input +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@smb_input +PREHOOK: query: -- smbs +CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@smb_input1 +POSTHOOK: query: -- smbs +CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@smb_input1 +PREHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@smb_input2 +POSTHOOK: query: CREATE TABLE smb_input2(key int, value int) CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@smb_input2 +PREHOOK: query: from smb_input +insert overwrite table smb_input1 select * +insert overwrite table smb_input2 select * +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input +PREHOOK: Output: default@smb_input1 +PREHOOK: Output: default@smb_input2 +POSTHOOK: query: from smb_input +insert overwrite table smb_input1 select * +insert overwrite table smb_input2 select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input +POSTHOOK: Output: default@smb_input1 +POSTHOOK: Output: default@smb_input2 +POSTHOOK: Lineage: smb_input1.key SIMPLE [(smb_input)smb_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_input1.value SIMPLE [(smb_input)smb_input.FieldSchema(name:value, type:int, comment:null), ] +POSTHOOK: Lineage: smb_input2.key SIMPLE [(smb_input)smb_input.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: smb_input2.value SIMPLE [(smb_input)smb_input.FieldSchema(name:value, type:int, comment:null), ] +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 1000 +10 100 10 1000 +10 1000 10 100 +10 1000 10 100 +10 1000 10 1000 +100 100 100 100 +12 100 12 100 +12 100 12 NULL +12 NULL 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 10050 NULL 35 +NULL 10050 NULL NULL +NULL 35 NULL 10050 +NULL 35 NULL 35 +NULL 35 NULL NULL +NULL NULL NULL 10050 +NULL NULL NULL 35 +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key AND a.value <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 1000 10 1000 +100 100 100 100 +12 100 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 35 NULL 35 +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input1 b ON a.key <=> b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 1000 +10 100 10 1000 +10 1000 10 100 +10 1000 10 100 +10 1000 10 1000 +100 100 100 100 +12 100 12 100 +12 100 12 NULL +12 NULL 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 10050 NULL 35 +NULL 10050 NULL NULL +NULL 35 NULL 10050 +NULL 35 NULL 35 +NULL 35 NULL NULL +NULL NULL NULL 10050 +NULL NULL NULL 35 +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input1 b ON a.key <=> b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 1000 +10 100 10 1000 +10 1000 10 100 +10 1000 10 100 +10 1000 10 1000 +100 100 100 100 +12 100 12 100 +12 100 12 NULL +12 NULL 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 10050 NULL 35 +NULL 10050 NULL NULL +NULL 35 NULL 10050 +NULL 35 NULL 35 +NULL 35 NULL NULL +NULL NULL NULL 10050 +NULL NULL NULL 35 +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input1 b ON a.key <=> b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 1000 +10 100 10 1000 +10 1000 10 100 +10 1000 10 100 +10 1000 10 1000 +100 100 100 100 +12 100 12 100 +12 100 12 NULL +12 NULL 12 100 +12 NULL 12 NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 NULL 10050 +NULL 10050 NULL 35 +NULL 10050 NULL NULL +NULL 35 NULL 10050 +NULL 35 NULL 35 +NULL 35 NULL NULL +NULL NULL NULL 10050 +NULL NULL NULL 35 +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +PREHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +POSTHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +100 100 10 100 +100 100 10 100 +100 100 100 100 +100 100 12 100 +35 10035 NULL 35 +NULL 10050 12 NULL +NULL 10050 NULL NULL +NULL 35 12 NULL +NULL 35 NULL NULL +NULL NULL 12 NULL +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +PREHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a JOIN smb_input2 b ON a.key <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +POSTHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +100 100 10 100 +100 100 10 100 +100 100 100 100 +100 100 12 100 +35 10035 NULL 35 +NULL 10050 12 NULL +NULL 10050 NULL NULL +NULL 35 12 NULL +NULL 35 NULL NULL +NULL NULL 12 NULL +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +PREHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input1 a LEFT OUTER JOIN smb_input2 b ON a.key <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +POSTHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +10 100 NULL NULL +10 100 NULL NULL +10 1000 NULL NULL +100 100 10 100 +100 100 10 100 +100 100 100 100 +100 100 12 100 +12 100 NULL NULL +12 NULL NULL NULL +15 10015 NULL NULL +20 10020 NULL NULL +25 10025 NULL NULL +30 10030 NULL NULL +35 10035 NULL 35 +40 10040 NULL NULL +40 10040 NULL NULL +5 10005 NULL NULL +50 10050 NULL NULL +50 10050 NULL NULL +50 10050 NULL NULL +60 10040 NULL NULL +60 10040 NULL NULL +70 10040 NULL NULL +70 10040 NULL NULL +80 10040 NULL NULL +80 10040 NULL NULL +NULL 10050 12 NULL +NULL 10050 NULL NULL +NULL 35 12 NULL +NULL 35 NULL NULL +NULL NULL 12 NULL +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input1 +PREHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input1 a RIGHT OUTER JOIN smb_input2 b ON a.key <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input1 +POSTHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +100 100 10 100 +100 100 10 100 +100 100 100 100 +100 100 12 100 +35 10035 NULL 35 +NULL 10050 12 NULL +NULL 10050 NULL NULL +NULL 35 12 NULL +NULL 35 NULL NULL +NULL NULL 10 1000 +NULL NULL 12 NULL +NULL NULL 15 10015 +NULL NULL 20 10020 +NULL NULL 25 10025 +NULL NULL 30 10030 +NULL NULL 35 10035 +NULL NULL 40 10040 +NULL NULL 40 10040 +NULL NULL 5 10005 +NULL NULL 50 10050 +NULL NULL 50 10050 +NULL NULL 50 10050 +NULL NULL 60 10040 +NULL NULL 60 10040 +NULL NULL 70 10040 +NULL NULL 70 10040 +NULL NULL 80 10040 +NULL NULL 80 10040 +NULL NULL NULL 10050 +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 100 100 +10 100 100 100 +10 100 12 100 +10 100 12 100 +10 1000 10 1000 +100 100 10 100 +100 100 10 100 +100 100 100 100 +100 100 12 100 +12 100 10 100 +12 100 10 100 +12 100 100 100 +12 100 12 100 +12 NULL 12 NULL +12 NULL NULL NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 NULL 10050 +NULL 35 NULL 35 +NULL NULL 12 NULL +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM smb_input2 a RIGHT OUTER JOIN smb_input2 b ON a.value <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 100 100 +10 100 100 100 +10 100 12 100 +10 100 12 100 +10 1000 10 1000 +100 100 10 100 +100 100 10 100 +100 100 100 100 +100 100 12 100 +12 100 10 100 +12 100 10 100 +12 100 100 100 +12 100 12 100 +12 NULL 12 NULL +12 NULL NULL NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 NULL 10050 +NULL 35 NULL 35 +NULL NULL 12 NULL +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a JOIN smb_input2 b ON a.value <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 100 100 +10 100 100 100 +10 100 12 100 +10 100 12 100 +10 1000 10 1000 +100 100 10 100 +100 100 10 100 +100 100 100 100 +100 100 12 100 +12 100 10 100 +12 100 10 100 +12 100 100 100 +12 100 12 100 +12 NULL 12 NULL +12 NULL NULL NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 NULL 10050 +NULL 35 NULL 35 +NULL NULL 12 NULL +NULL NULL NULL NULL +PREHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value <=> b.value +PREHOOK: type: QUERY +PREHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT /*+ MAPJOIN(b) */ * FROM smb_input2 a LEFT OUTER JOIN smb_input2 b ON a.value <=> b.value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@smb_input2 +#### A masked pattern was here #### +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 10 100 +10 100 100 100 +10 100 100 100 +10 100 12 100 +10 100 12 100 +10 1000 10 1000 +100 100 10 100 +100 100 10 100 +100 100 100 100 +100 100 12 100 +12 100 10 100 +12 100 10 100 +12 100 100 100 +12 100 12 100 +12 NULL 12 NULL +12 NULL NULL NULL +15 10015 15 10015 +20 10020 20 10020 +25 10025 25 10025 +30 10030 30 10030 +35 10035 35 10035 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 40 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 60 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 70 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +40 10040 80 10040 +5 10005 5 10005 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 50 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +50 10050 NULL 10050 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 40 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 60 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 70 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +60 10040 80 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 40 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 60 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 70 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +70 10040 80 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 40 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 60 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 70 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +80 10040 80 10040 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 50 10050 +NULL 10050 NULL 10050 +NULL 35 NULL 35 +NULL NULL 12 NULL +NULL NULL NULL NULL +PREHOOK: query: --HIVE-3315 join predicate transitive +explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL +PREHOOK: type: QUERY +POSTHOOK: query: --HIVE-3315 join predicate transitive +explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: value is null (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: null (type: void) + sort order: + + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: key (type: int) + Map 3 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 3 Data size: 26 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is null (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: null (type: void) + sort order: + + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + value expressions: value (type: int) + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} + 1 {VALUE._col0} + nullSafes: [true] + outputColumnNames: _col1, _col5 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: null (type: void), _col1 (type: int), _col5 (type: int), null (type: void) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL +PREHOOK: type: QUERY +PREHOOK: Input: default@myinput1 +#### A masked pattern was here #### +POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL +POSTHOOK: type: QUERY +POSTHOOK: Input: default@myinput1 +#### A masked pattern was here #### +NULL 10 10 NULL +NULL 10 48 NULL +NULL 10 NULL NULL +NULL 35 10 NULL +NULL 35 48 NULL +NULL 35 NULL NULL +NULL NULL 10 NULL +NULL NULL 48 NULL +NULL NULL NULL NULL diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out index a2f677d..bffcd23 100644 --- a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out +++ b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out @@ -151,112 +151,112 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 #### A masked pattern was here #### -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -45.00 45 -45.00 45 -45.00 45 -45.00 45 -45.00 45 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +45 45 +45 45 +45 45 +45 45 +45 45 +79 79 +79 79 +79 79 +79 79 +79 79 +79 79 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +6 6 +6 6 +6 6 +6 6 +6 6 +6 6 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -267,112 +267,112 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 #### A masked pattern was here #### -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -45.00 45 -45.00 45 -45.00 45 -45.00 45 -45.00 45 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +45 45 +45 45 +45 45 +45 45 +45 45 +79 79 +79 79 +79 79 +79 79 +79 79 +79 79 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +6 6 +6 6 +6 6 +6 6 +6 6 +6 6 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -383,109 +383,109 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 #### A masked pattern was here #### -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -9.00 9 -45.00 45 -45.00 45 -45.00 45 -45.00 45 -45.00 45 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -79.00 79 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -17.00 17 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -6.00 6 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -62.00 62 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -64.00 64 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -89.00 89 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -70.00 70 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 -14.00 14 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +9 9 +45 45 +45 45 +45 45 +45 45 +45 45 +79 79 +79 79 +79 79 +79 79 +79 79 +79 79 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +17 17 +6 6 +6 6 +6 6 +6 6 +6 6 +6 6 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +62 62 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +64 64 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +89 89 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +70 70 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 +14 14 diff --git a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out index e718b29..b422db5 100644 --- a/ql/src/test/results/clientpositive/tez/orc_analyze.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_analyze.q.out @@ -107,7 +107,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3121 + totalSize 3123 #### A masked pattern was here #### # Storage Information @@ -197,7 +197,7 @@ Table Parameters: numFiles 1 numRows 100 rawDataSize 52600 - totalSize 3121 + totalSize 3123 #### A masked pattern was here #### # Storage Information @@ -313,7 +313,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information @@ -358,7 +358,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2043 + totalSize 2045 #### A masked pattern was here #### # Storage Information @@ -460,7 +460,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information @@ -505,7 +505,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2043 + totalSize 2045 #### A masked pattern was here #### # Storage Information @@ -627,7 +627,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information @@ -672,7 +672,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2043 + totalSize 2045 #### A masked pattern was here #### # Storage Information @@ -780,7 +780,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information @@ -825,7 +825,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 22050 - totalSize 2043 + totalSize 2045 #### A masked pattern was here #### # Storage Information @@ -992,7 +992,7 @@ Partition Parameters: numFiles 1 numRows 50 rawDataSize 21950 - totalSize 2024 + totalSize 2026 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out index 83f8b33..ea2dd5d 100644 --- a/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/tez/orc_merge_incompat1.q.out @@ -145,6 +145,19 @@ POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchem POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] +PREHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: type: QUERY +PREHOOK: Input: default@orc_merge5 +PREHOOK: Output: default@orc_merge5b +POSTHOOK: query: insert into table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@orc_merge5 +POSTHOOK: Output: default@orc_merge5b +POSTHOOK: Lineage: orc_merge5b.decimal1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:decimal1, type:decimal(10,0), comment:null), ] +POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:string1, type:string, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] +POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] PREHOOK: query: -- 5 files total analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY @@ -155,7 +168,7 @@ analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -Found 5 items +Found 6 items #### A masked pattern was here #### PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY @@ -170,6 +183,8 @@ POSTHOOK: Input: default@orc_merge5b 13 bar 80.0 2 1969-12-31 16:00:05 13 bar 80.0 2 1969-12-31 16:00:05 13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 +2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 @@ -180,6 +195,7 @@ POSTHOOK: Input: default@orc_merge5b 5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 +5 eat 0.8 6 1969-12-31 16:00:20 PREHOOK: query: alter table orc_merge5b concatenate PREHOOK: type: ALTER_TABLE_MERGE PREHOOK: Input: default@orc_merge5b @@ -198,7 +214,7 @@ analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -Found 3 items +Found 4 items #### A masked pattern was here #### PREHOOK: query: select * from orc_merge5b PREHOOK: type: QUERY @@ -213,11 +229,14 @@ POSTHOOK: Input: default@orc_merge5b 13 bar 80.0 2 1969-12-31 16:00:05 13 bar 80.0 2 1969-12-31 16:00:05 13 bar 80.0 2 1969-12-31 16:00:05 +13 bar 80.0 2 1969-12-31 16:00:05 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 2 foo 0.8 1 1969-12-31 16:00:00 +2 foo 0.8 1 1969-12-31 16:00:00 +5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 5 eat 0.8 6 1969-12-31 16:00:20 diff --git a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out index 59a6e57..f5bad7f 100644 --- a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out @@ -180,6 +180,7 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 + nullSafes: [false, true] outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator diff --git a/ql/src/test/results/clientpositive/tez/update_all_types.q.out b/ql/src/test/results/clientpositive/tez/update_all_types.q.out index f1353d0..eba4dde 100644 --- a/ql/src/test/results/clientpositive/tez/update_all_types.q.out +++ b/ql/src/test/results/clientpositive/tez/update_all_types.q.out @@ -87,16 +87,16 @@ POSTHOOK: query: select * from acid_uat order by i POSTHOOK: type: QUERY POSTHOOK: Input: default@acid_uat #### A masked pattern was here #### -11 NULL -1073279343 -1595604468 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true +11 NULL -1073279343 -1595604468 11.0 NULL 11 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true NULL -7382 -1073051226 -1887561756 NULL -7382.0 NULL NULL 1970-01-01 A34p7oRr2WvUJNf A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d false -11 NULL -1072910839 2048385991 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false +11 NULL -1072910839 2048385991 11.0 NULL 11 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false NULL 8373 -1072081801 1864027286 NULL 8373.0 NULL NULL 1970-01-01 dPkN74F7 dPkN74F7 4KWs6gw7lv2WYd66P true NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vklju213J1mr 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P true --51 NULL -1071480828 -1401575336 -51.0 NULL -51.0 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true -8 NULL -1071363017 1349676361 8.0 NULL 8.0 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true +-51 NULL -1071480828 -1401575336 -51.0 NULL -51 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true +8 NULL -1071363017 1349676361 8.0 NULL 8 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true NULL -741 -1070883071 -1645852809 NULL -741.0 NULL NULL 1970-01-01 0ruyd6Y50JpdGRf6HqD 0ruyd6Y50JpdGRf6HqD xH7445Rals48VOulSyR5F false NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false -11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true +11 NULL -1069736047 -453772520 11.0 NULL 11 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true PREHOOK: query: update acid_uat set ti = 1, si = 2, @@ -141,15 +141,15 @@ POSTHOOK: query: select * from acid_uat order by i POSTHOOK: type: QUERY POSTHOOK: Input: default@acid_uat #### A masked pattern was here #### -11 NULL -1073279343 -1595604468 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true +11 NULL -1073279343 -1595604468 11.0 NULL 11 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true NULL -7382 -1073051226 -1887561756 NULL -7382.0 NULL NULL 1970-01-01 A34p7oRr2WvUJNf A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d false -11 NULL -1072910839 2048385991 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false +11 NULL -1072910839 2048385991 11.0 NULL 11 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false NULL 8373 -1072081801 1864027286 NULL 8373.0 NULL NULL 1970-01-01 dPkN74F7 dPkN74F7 4KWs6gw7lv2WYd66P true NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vklju213J1mr 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P true --51 NULL -1071480828 -1401575336 -51.0 NULL -51.0 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true -8 NULL -1071363017 1349676361 8.0 NULL 8.0 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true +-51 NULL -1071480828 -1401575336 -51.0 NULL -51 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true +8 NULL -1071363017 1349676361 8.0 NULL 8 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false -11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true +11 NULL -1069736047 -453772520 11.0 NULL 11 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true 1 2 3 4 3.14 6.28 5.99 NULL 2014-09-01 its a beautiful day in the neighbhorhood a beautiful day for a neighbor wont you be mine true PREHOOK: query: update acid_uat set ti = ti * 2, @@ -175,13 +175,13 @@ POSTHOOK: query: select * from acid_uat order by i POSTHOOK: type: QUERY POSTHOOK: Input: default@acid_uat #### A masked pattern was here #### -11 NULL -1073279343 -1595604468 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true +11 NULL -1073279343 -1595604468 11.0 NULL 11 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true NULL -7382 -1073051226 -1887561756 NULL -7382.0 NULL NULL 1970-01-01 A34p7oRr2WvUJNf A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d false -11 NULL -1072910839 2048385991 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false +11 NULL -1072910839 2048385991 11.0 NULL 11 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false NULL 8373 -1072081801 1864027286 NULL 8373.0 NULL NULL 1970-01-01 dPkN74F7 dPkN74F7 4KWs6gw7lv2WYd66P true NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vklju213J1mr 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P true --102 -51 -1071480828 -1401575336 -51.0 -51.0 -51.0 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true -8 NULL -1071363017 1349676361 8.0 NULL 8.0 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true +-102 -51 -1071480828 -1401575336 -51.0 -51.0 -51 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true +8 NULL -1071363017 1349676361 8.0 NULL 8 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false -11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true +11 NULL -1069736047 -453772520 11.0 NULL 11 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true 1 2 3 4 3.14 6.28 5.99 NULL 2014-09-01 its a beautiful day in the neighbhorhood a beautiful day for a neighbor wont you be mine true diff --git a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out index ad8c4c0..bf97900 100644 --- a/ql/src/test/results/clientpositive/tez/vector_between_in.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_between_in.q.out @@ -699,19 +699,19 @@ POSTHOOK: Input: default@decimal_date_test -18.5162162162 -17.3216216216 -16.7243243243 --16.1270270270 +-16.127027027 -15.5297297297 -10.7513513514 -9.5567567568 -8.3621621622 --5.9729729730 +-5.972972973 -3.5837837838 4.1810810811 4.7783783784 4.7783783784 5.3756756757 -5.9729729730 -5.9729729730 +5.972972973 +5.972972973 11.3486486486 11.3486486486 11.9459459459 diff --git a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out index 3f38a45..eca2cbc 100644 --- a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out @@ -157,7 +157,7 @@ POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc OR POSTHOOK: type: QUERY POSTHOOK: Input: default@over1korc #### A masked pattern was here #### -108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.70 undecided +108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.7 undecided 118 497 65536 4294967381 50.32 12.72 false david nixon 2013-03-01 09:11:58.703285 83.48 values clariffication 18 280 65536 4294967320 32.92 45.94 false holly white 2013-03-01 09:11:58.703086 58.86 topology 69 489 65536 4294967404 33.52 17.99 false oscar ichabod 2013-03-01 09:11:58.703247 32.68 topology @@ -250,7 +250,7 @@ POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc OR POSTHOOK: type: QUERY POSTHOOK: Input: default@over1korc #### A masked pattern was here #### -108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.70 undecided +108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.7 undecided 118 497 65536 4294967381 50.32 12.72 false david nixon 2013-03-01 09:11:58.703285 83.48 values clariffication 18 280 65536 4294967320 32.92 45.94 false holly white 2013-03-01 09:11:58.703086 58.86 topology 69 489 65536 4294967404 33.52 17.99 false oscar ichabod 2013-03-01 09:11:58.703247 32.68 topology diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out index 8974bb8..6f2b221 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out @@ -53,25 +53,25 @@ STAGE PLANS: Map Operator Tree: TableScan alias: decimal - Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: dec (type: decimal(10,0)) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: decimal(10,0)) sort order: + - Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE Execution mode: vectorized Reducer 2 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: decimal(10,0)) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 219 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out index af9459f..ba84822 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_2.q.out @@ -1605,7 +1605,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_2 #### A masked pattern was here #### -1.0 +1 PREHOOK: query: explain select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out index 7987d08..75f872e 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_3.q.out @@ -47,7 +47,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -56,7 +56,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -67,8 +67,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1.0 1 -1.000000000000000000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 @@ -76,14 +76,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -92,14 +92,14 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 200 200 125.2 125 -124.00 124 +124 124 100 100 20 20 10 10 -3.140 4 +3.14 4 3.14 3 3.14 3 3.14 3 @@ -107,8 +107,8 @@ POSTHOOK: Input: default@decimal_3 2 2 1.122 1 1.12 1 -1.000000000000000000 1 -1.0 1 +1 1 +1 1 1 1 0.333 0 0.33 0 @@ -119,7 +119,7 @@ POSTHOOK: Input: default@decimal_3 0.01 0 0 0 0 0 -0.000000000000000000 0 +0 0 -0.3 0 -0.33 0 -0.333 0 @@ -128,7 +128,7 @@ POSTHOOK: Input: default@decimal_3 -1.122 -11 -1255.49 -1255 -4400 4400 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 NULL 0 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value PREHOOK: type: QUERY @@ -139,7 +139,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -148,7 +148,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -159,8 +159,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1.0 1 -1.000000000000000000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 @@ -168,14 +168,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -185,7 +185,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL --1234567890.1234567890 +-1234567890.123456789 -4400 -1255.49 -1.122 @@ -193,7 +193,7 @@ NULL -0.333 -0.33 -0.3 -0.000000000000000000 +0 0.01 0.02 0.1 @@ -209,10 +209,10 @@ NULL 10 20 100 -124.00 +124 125.2 200 -1234567890.1234567800 +1234567890.12345678 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -222,7 +222,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -230,7 +230,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0.01 0 0.02 0 0.1 0 @@ -246,10 +246,10 @@ NULL 0 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -258,23 +258,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### --1234567890 -1234567890.1234567890 +-1234567890 -1234567890.123456789 -1255 -1255.49 -11 -1.122 -1 -2.24 -0 0.330000000000000000 -1 5.242000000000000000 +0 0.33 +1 5.242 2 4 3 9.42 -4 3.140 +4 3.14 10 10 20 20 100 100 -124 124.00 +124 124 125 125.2 200 200 4400 -4400 -1234567890 1234567890.1234567800 +1234567890 1234567890.12345678 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -283,7 +283,7 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### --1234567890.1234567890 -1234567890 -1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -1234567890.123456789 -1234567890 -4400 4400 -4400 4400 -1255.49 -1255 -1255.49 -1255 -1.122 -11 -1.122 -11 @@ -294,13 +294,13 @@ POSTHOOK: Input: default@decimal_3 -0.333 0 -0.333 0 -0.33 0 -0.33 0 -0.3 0 -0.3 0 -0.000000000000000000 0 0.000000000000000000 0 -0.000000000000000000 0 0 0 -0.000000000000000000 0 0 0 0 0 0 0 -0 0 0.000000000000000000 0 0 0 0 0 -0 0 0.000000000000000000 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0.01 0 0.01 0 @@ -311,14 +311,14 @@ POSTHOOK: Input: default@decimal_3 0.33 0 0.33 0 0.333 0 0.333 0 1 1 1 1 -1 1 1.0 1 -1 1 1.000000000000000000 1 -1.0 1 1.000000000000000000 1 -1.0 1 1.0 1 -1.0 1 1 1 -1.000000000000000000 1 1.000000000000000000 1 -1.000000000000000000 1 1 1 -1.000000000000000000 1 1.0 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 1.12 1 1.12 1 1.122 1 1.122 1 2 2 2 2 @@ -334,20 +334,20 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 3.14 3 3.14 3 3.14 3 -3.14 3 3.140 4 -3.14 3 3.140 4 -3.14 3 3.140 4 -3.140 4 3.14 3 -3.140 4 3.14 3 -3.140 4 3.14 3 -3.140 4 3.140 4 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 4 10 10 10 10 20 20 20 20 100 100 100 100 -124.00 124 124.00 124 +124 124 124 124 125.2 125 125.2 125 200 200 200 200 -1234567890.1234567800 1234567890 1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -359,7 +359,7 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -371,7 +371,7 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 PREHOOK: query: DROP TABLE DECIMAL_3_txt PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_3_txt diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out index 483ae1f..613f5a8 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_4.q.out @@ -57,7 +57,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_1 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -66,7 +66,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.0000000000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -78,7 +78,7 @@ NULL 0 0.333 0 0.9999999999999999999999999 1 1 1 -1.0 1 +1 1 1.12 1 1.122 1 2 2 @@ -86,14 +86,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_4_2 @@ -103,7 +103,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_2 #### A masked pattern was here #### NULL NULL --1234567890.1234567890 -3703703670.3703703670 +-1234567890.123456789 -3703703670.370370367 -4400 -13200 -1255.49 -3766.47 -1.122 -3.366 @@ -112,7 +112,7 @@ NULL NULL -0.333 -0.999 -0.33 -0.99 -0.3 -0.9 -0.0000000000000000000000000 0.0000000000000000000000000 +0 0 0 0 0 0 0.01 0.03 @@ -124,7 +124,7 @@ NULL NULL 0.333 0.999 0.9999999999999999999999999 2.9999999999999999999999997 1 3 -1.0 3.0 +1 3 1.12 3.36 1.122 3.366 2 6 @@ -132,14 +132,14 @@ NULL NULL 3.14 9.42 3.14 9.42 3.14 9.42 -3.140 9.420 +3.14 9.42 10 30 20 60 100 300 -124.00 372.00 +124 372 125.2 375.6 200 600 -1234567890.1234567800 3703703670.3703703400 +1234567890.12345678 3703703670.37037034 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_4_2 @@ -149,7 +149,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_2 #### A masked pattern was here #### NULL NULL --1234567890.1234567890 -3703703670.3703703670 +-1234567890.123456789 -3703703670.370370367 -4400 -13200 -1255.49 -3766.47 -1.122 -3.366 @@ -158,7 +158,7 @@ NULL NULL -0.333 -0.999 -0.33 -0.99 -0.3 -0.9 -0.0000000000000000000000000 0.0000000000000000000000000 +0 0 0 0 0 0 0.01 0.03 @@ -170,7 +170,7 @@ NULL NULL 0.333 0.999 0.9999999999999999999999999 2.9999999999999999999999997 1 3 -1.0 3.0 +1 3 1.12 3.36 1.122 3.366 2 6 @@ -178,14 +178,14 @@ NULL NULL 3.14 9.42 3.14 9.42 3.14 9.42 -3.140 9.420 +3.14 9.42 10 30 20 60 100 300 -124.00 372.00 +124 372 125.2 375.6 200 600 -1234567890.1234567800 3703703670.3703703400 +1234567890.12345678 3703703670.37037034 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_4_2 @@ -195,7 +195,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_2 #### A masked pattern was here #### NULL NULL --1234567890.1234567890 -3703703670.3703703670 +-1234567890.123456789 -3703703670.370370367 -4400 -13200 -1255.49 -3766.47 -1.122 -3.366 @@ -204,7 +204,7 @@ NULL NULL -0.333 -0.999 -0.33 -0.99 -0.3 -0.9 -0.0000000000000000000000000 0.0000000000000000000000000 +0 0 0 0 0 0 0.01 0.03 @@ -216,7 +216,7 @@ NULL NULL 0.333 0.999 0.9999999999999999999999999 2.9999999999999999999999997 1 3 -1.0 3.0 +1 3 1.12 3.36 1.122 3.366 2 6 @@ -224,14 +224,14 @@ NULL NULL 3.14 9.42 3.14 9.42 3.14 9.42 -3.140 9.420 +3.14 9.42 10 30 20 60 100 300 -124.00 372.00 +124 372 125.2 375.6 200 600 -1234567890.1234567800 3703703670.3703703400 +1234567890.12345678 3703703670.37037034 PREHOOK: query: DROP TABLE DECIMAL_4_1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_4_1 diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out index 01b5f42..34c3351 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_5.q.out @@ -67,7 +67,7 @@ NULL -0.333 -0.33 -0.3 -0.00000 +0 0 0 0.01 @@ -78,8 +78,8 @@ NULL 0.33 0.333 1 -1.0 -1.00000 +1 +1 1.12 1.122 2 @@ -87,11 +87,11 @@ NULL 3.14 3.14 3.14 -3.140 +3.14 10 20 100 -124.00 +124 125.2 200 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key @@ -110,7 +110,7 @@ NULL -0.333 -0.33 -0.3 -0.00000 +0 0.01 0.02 0.1 @@ -126,7 +126,7 @@ NULL 10 20 100 -124.00 +124 125.2 200 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 @@ -185,7 +185,7 @@ POSTHOOK: Input: default@decimal_5 #### A masked pattern was here #### NULL NULL -0.000 +0 0 100 10 @@ -204,7 +204,7 @@ NULL -0.3 -0.33 -0.333 -1.0 +1 2 3.14 -1.12 @@ -212,13 +212,13 @@ NULL -1.122 1.12 1.122 -124.00 +124 125.2 NULL 3.14 3.14 -3.140 -1.000 +3.14 +1 NULL NULL PREHOOK: query: DROP TABLE DECIMAL_5_txt diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out index 7ecd500..9cdd7fc 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_6.q.out @@ -125,20 +125,20 @@ NULL 1234567890 -1.12 -1 -0.333 0 -0.3 0 -0.00000 0 +0 0 0 0 0.333 0 -1.0 1 -1.00000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 10.73433 5 -124.00 124 +124 124 125.2 125 23232.23435 2 PREHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value @@ -157,20 +157,20 @@ NULL 0 -1.12 -1 -0.333 0 -0.3 0 -0.0000 0 +0 0 0 0 0.333 0 -1.0 1 -1.0000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 10.7343 5 -124.00 124 +124 124 125.2 125 23232.2344 2 2389432.2375 3 @@ -213,16 +213,16 @@ NULL -0.333 -0.3 -0.3 -0.00000 -0.0000 +0 +0 0 0 0.333 0.333 -1.0 -1.0 -1.0000 -1.00000 +1 +1 +1 +1 1.12 1.12 1.122 @@ -233,14 +233,14 @@ NULL 3.14 3.14 3.14 -3.140 -3.140 +3.14 +3.14 10 10 10.7343 10.73433 -124.00 -124.00 +124 +124 125.2 125.2 23232.23435 diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out index e4b2c56..0037e2a 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out @@ -115,9 +115,9 @@ POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 -3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 --563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 +-563 2 -515.621072973 -3367.6517567568 -3883.2728297298 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 -6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 +6981 3 5831542.269248378 -515.621072973 5830511.027102432 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 @@ -219,9 +219,9 @@ POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 1956.576923076922966667 6821.495748565159 6822.606289190924 -3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 --563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 -2325.50327307692295 1707.9424961538462 2415.395441814127 +-563 2 -515.621072973 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 -2325.50327307692295 1707.9424961538462 2415.395441814127 762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 3493144.07839499984625 3491310.1327026924 4937458.140118758 -6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 +6981 3 5831542.269248378 -515.621072973 5830511.027102432 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out index e2fe3a2..59b80f2 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_cast.q.out @@ -29,13 +29,13 @@ POSTHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS D POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### --13326.0 528534767 true 1969-12-31 15:59:46.674 -13326.0 528534767 1 -13 --15813.0 528534767 true 1969-12-31 15:59:55.787 -15813.0 528534767 1 -4 --9566.0 528534767 true 1969-12-31 15:59:44.187 -9566.0 528534767 1 -16 -15007.0 528534767 true 1969-12-31 15:59:50.434 15007.0 528534767 1 -10 -7021.0 528534767 true 1969-12-31 16:00:15.007 7021.0 528534767 1 15 -4963.0 528534767 true 1969-12-31 16:00:07.021 4963.0 528534767 1 7 --7824.0 528534767 true 1969-12-31 16:00:04.963 -7824.0 528534767 1 5 --15431.0 528534767 true 1969-12-31 15:59:52.176 -15431.0 528534767 1 -8 --15549.0 528534767 true 1969-12-31 15:59:44.569 -15549.0 528534767 1 -15 -5780.0 528534767 true 1969-12-31 15:59:44.451 5780.0 528534767 1 -16 +-13326.0 528534767 true 1969-12-31 15:59:46.674 -13326 528534767 1 -13 +-15813.0 528534767 true 1969-12-31 15:59:55.787 -15813 528534767 1 -4 +-9566.0 528534767 true 1969-12-31 15:59:44.187 -9566 528534767 1 -16 +15007.0 528534767 true 1969-12-31 15:59:50.434 15007 528534767 1 -10 +7021.0 528534767 true 1969-12-31 16:00:15.007 7021 528534767 1 15 +4963.0 528534767 true 1969-12-31 16:00:07.021 4963 528534767 1 7 +-7824.0 528534767 true 1969-12-31 16:00:04.963 -7824 528534767 1 5 +-15431.0 528534767 true 1969-12-31 15:59:52.176 -15431 528534767 1 -8 +-15549.0 528534767 true 1969-12-31 15:59:44.569 -15549 528534767 1 -15 +5780.0 528534767 true 1969-12-31 15:59:44.451 5780 528534767 1 -16 diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out index 7faa630..a1e95e8 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out @@ -40,12 +40,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_test #### A masked pattern was here #### 19699.417463617423 -12507.913305613346 0.8351496686995997 2.8303425077026896E7 3.6405405405 8963 10735 -17 8963 true 10735.776923076923 8963.641 10735.776923076923 1969-12-31 18:29:23.64054054 -9216.339708939685 -5851.806444906470 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 +9216.339708939685 -5851.80644490647 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 6514.8403326403464 -4136.5212058211928 0.8355907765708067 3095563.9418919063 4.3864864865 2964 3550 -34 2964 true 3550.4538461538464 2964.3865 3550.4538461538464 1969-12-31 16:49:24.386486486 7587.301455301477 -4817.467775467754 0.8354976172734904 4198623.24324327 2.3783783784 3452 4134 38 3452 true 4134.923076923077 3452.3784 4134.923076923077 1969-12-31 16:57:32.378378378 -19197.9729729730 -12189.5270270270 0.835155361813429 2.6880848817567654E7 5.4729729730 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 +19197.972972973 -12189.527027027 0.835155361813429 2.6880848817567654E7 5.472972973 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 17098.9945945946 -10856.8054054054 0.8351828165813104 2.132423090270272E7 0.3945945946 7780 9318 102 7780 true 9318.6 7780.3945 9318.6 1969-12-31 18:09:40.394594594 12433.723076923077 -7894.646153846154 0.8352770361086894 1.12754688E7 7.6 5657 6776 120 5657 true 6776.123076923077 5657.6 6776.123076923077 1969-12-31 17:34:17.6 7247.316839916862 -4601.598544698524 0.8355241651897876 3830775.6932432684 7.6783783784 3297 3949 109 3297 true 3949.638461538462 3297.6785 3949.638461538462 1969-12-31 16:54:57.678378378 -14757.1700623700465 -9369.8914760914930 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 +14757.1700623700465 -9369.891476091493 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 10964.832016631993 -6961.991060291086 0.8353232978714221 8768719.779729689 9.2243243243 4989 5975 87 4989 true 5975.607692307693 4989.224 5975.607692307693 1969-12-31 17:23:09.224324324 diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out index 6a5ccc6..049be26 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_mapjoin.q.out @@ -109,26 +109,26 @@ POSTHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_mapjoin #### A masked pattern was here #### -6981 6981 -515.6210729730 -617.5607769230769 -6981 6981 -515.6210729730 -617.5607769230769 -6981 6981 -515.6210729730 -617.5607769230769 -6981 6981 -515.6210729730 -617.5607769230769 -6981 6981 -515.6210729730 6984454.211097692 -6981 6981 -515.6210729730 6984454.211097692 -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL +6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.621072973 6984454.211097692 +6981 6981 -515.621072973 6984454.211097692 +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL 6981 6981 5831542.269248378 -617.5607769230769 6981 6981 5831542.269248378 -617.5607769230769 6981 6981 5831542.269248378 6984454.211097692 diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out index d155623..3d9d89a 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_precision.q.out @@ -99,13 +99,13 @@ NULL NULL NULL NULL -0.0000000000 -0.0000000000 -0.0000000000 -0.0000000000 0 -0.1234567890 -0.1234567890 +0 +0 +0 +0 +0.123456789 +0.123456789 1.2345678901 1.2345678901 1.2345678901 @@ -129,7 +129,7 @@ NULL 123456789.0123456 123456789.0123456789 1234567890.123456 -1234567890.1234567890 +1234567890.123456789 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -182,13 +182,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 0 1 -1 -0.1234567890 1.1234567890 -0.8765432110 -0.1234567890 1.1234567890 -0.8765432110 +0 1 -1 +0 1 -1 +0 1 -1 +0 1 -1 +0.123456789 1.123456789 -0.876543211 +0.123456789 1.123456789 -0.876543211 1.2345678901 2.2345678901 0.2345678901 1.2345678901 2.2345678901 0.2345678901 1.2345678901 2.2345678901 0.2345678901 @@ -212,7 +212,7 @@ NULL NULL NULL 123456789.0123456 123456790.0123456 123456788.0123456 123456789.0123456789 123456790.0123456789 123456788.0123456789 1234567890.123456 1234567891.123456 1234567889.123456 -1234567890.1234567890 1234567891.1234567890 1234567889.1234567890 +1234567890.123456789 1234567891.123456789 1234567889.123456789 PREHOOK: query: SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -265,13 +265,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 0 0 0 -0.1234567890 0.2469135780 0.041152263 -0.1234567890 0.2469135780 0.041152263 +0 0 0 +0 0 0 +0 0 0 +0 0 0 +0.123456789 0.246913578 0.041152263 +0.123456789 0.246913578 0.041152263 1.2345678901 2.4691357802 0.411522630033 1.2345678901 2.4691357802 0.411522630033 1.2345678901 2.4691357802 0.411522630033 @@ -281,9 +281,9 @@ NULL NULL NULL 123.4567890123 246.9135780246 41.1522630041 123.4567890123 246.9135780246 41.1522630041 123.4567890123 246.9135780246 41.1522630041 -1234.5678901235 2469.1357802470 411.522630041167 -1234.5678901235 2469.1357802470 411.522630041167 -1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 12345.6789012346 24691.3578024692 4115.226300411533 12345.6789012346 24691.3578024692 4115.226300411533 123456.7890123456 246913.5780246912 41152.2630041152 @@ -295,7 +295,7 @@ NULL NULL NULL 123456789.0123456 246913578.0246912 41152263.0041152 123456789.0123456789 246913578.0246913578 41152263.0041152263 1234567890.123456 2469135780.246912 411522630.041152 -1234567890.1234567890 2469135780.2469135780 411522630.041152263 +1234567890.123456789 2469135780.246913578 411522630.041152263 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -348,13 +348,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 0 0 -0.1234567890 0.013717421 -0.1234567890 0.013717421 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.013717421 +0.123456789 0.013717421 1.2345678901 0.137174210011 1.2345678901 0.137174210011 1.2345678901 0.137174210011 @@ -378,7 +378,7 @@ NULL NULL 123456789.0123456 13717421.001371733333 123456789.0123456789 13717421.0013717421 1234567890.123456 137174210.013717333333 -1234567890.1234567890 137174210.013717421 +1234567890.123456789 137174210.013717421 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -431,13 +431,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 0 0 -0.1234567890 0.0045724736667 -0.1234567890 0.0045724736667 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.0045724736667 +0.123456789 0.0045724736667 1.2345678901 0.0457247366704 1.2345678901 0.0457247366704 1.2345678901 0.0457247366704 @@ -461,7 +461,7 @@ NULL NULL 123456789.0123456 4572473.6671239111111 123456789.0123456789 4572473.6671239140333 1234567890.123456 45724736.6712391111111 -1234567890.1234567890 45724736.6712391403333 +1234567890.123456789 45724736.6712391403333 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -514,13 +514,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 0 0 -0.1234567890 0.01524157875019052100 -0.1234567890 0.01524157875019052100 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.015241578750190521 +0.123456789 0.015241578750190521 1.2345678901 1.52415787526596567801 1.2345678901 1.52415787526596567801 1.2345678901 1.52415787526596567801 @@ -544,7 +544,7 @@ NULL NULL 123456789.0123456 15241578753238817.26870921383936 123456789.0123456789 15241578753238836.75019051998750190521 1234567890.123456 NULL -1234567890.1234567890 NULL +1234567890.123456789 NULL PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out index cb0b5a2..ffdb1c9 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_trailing.q.out @@ -76,13 +76,13 @@ POSTHOOK: Input: default@decimal_trailing 0 0 0 1 0 0 2 NULL NULL -3 1.0000 1.00000000 -4 10.0000 10.00000000 -5 100.0000 100.00000000 -6 1000.0000 1000.00000000 -7 10000.0000 10000.00000000 -8 100000.0000 100000.00000000 -9 NULL 1000000.00000000 +3 1 1 +4 10 10 +5 100 100 +6 1000 1000 +7 10000 10000 +8 100000 100000 +9 NULL 1000000 10 NULL NULL 11 NULL NULL 12 NULL NULL @@ -91,18 +91,18 @@ POSTHOOK: Input: default@decimal_trailing 15 NULL NULL 16 NULL NULL 17 NULL NULL -18 1.0000 1.00000000 -19 10.000 10.0000000 -20 100.00 100.000000 -21 1000.0 1000.00000 -22 100000 10000.0000 -23 0.0000 0.00000000 -24 0.000 0.0000000 -25 0.00 0.000000 -26 0.0 0.00000 -27 0 0.00000 -28 12313.2000 134134.31252500 -29 99999.9990 134134.31242553 +18 1 1 +19 10 10 +20 100 100 +21 1000 1000 +22 100000 10000 +23 0 0 +24 0 0 +25 0 0 +26 0 0 +27 0 0 +28 12313.2 134134.312525 +29 99999.999 134134.31242553 PREHOOK: query: DROP TABLE DECIMAL_TRAILING_txt PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_trailing_txt diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out index 8e847f6..266b4cb 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out @@ -97,7 +97,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -8800 NULL -0.0000000000 +0 0 200 20 @@ -116,7 +116,7 @@ NULL -0.6 -0.66 -0.666 -2.0 +2 4 6.28 -2.24 @@ -124,15 +124,15 @@ NULL -2.244 2.24 2.244 -248.00 +248 250.4 -2510.98 6.28 6.28 -6.280 -2.0000000000 --2469135780.2469135780 -2469135780.2469135600 +6.28 +2 +-2469135780.246913578 +2469135780.24691356 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF @@ -180,7 +180,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 0 NULL -0.0000000000 +0 0 200 20 @@ -199,7 +199,7 @@ NULL -0.3 -0.33 -0.333 -2.0 +2 4 6.14 -2.12 @@ -207,15 +207,15 @@ NULL -12.122 2.12 2.122 -248.00 +248 250.2 -2510.49 6.14 6.14 -7.140 -2.0000000000 --2469135780.1234567890 -2469135780.1234567800 +7.14 +2 +-2469135780.123456789 +2469135780.12345678 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF @@ -430,42 +430,42 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 0 NULL -0.0000000000 0 0 0 0 -0.0 -0.00 0 0 0 0 -0.0 -0.00 -0.0 -0.00 -0.000 -0.0 -0.00 -0.000 -0.0 0 -0.00 -0.00 -0.00 -0.000 -0.00 -0.000 -0.00 -0.0 -0.00 -0.00 -0.00 -0.000 -0.0000000000 -0.0000000000 -0.0000000000 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF @@ -513,7 +513,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -8800 NULL -0.0000000000 +0 0 0 0 @@ -532,7 +532,7 @@ NULL -0.3 -0.33 -0.333 -0.0 +0 0 0.14 -0.12 @@ -540,15 +540,15 @@ NULL 9.878 0.12 0.122 -0.00 +0 0.2 -0.49 0.14 0.14 --0.860 -0.0000000000 --0.1234567890 -0.1234567800 +-0.86 +0 +-0.123456789 +0.12345678 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF @@ -763,7 +763,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 19360000 NULL -0.00000000000000000000 +0 0 10000 100 @@ -782,7 +782,7 @@ NULL 0.09 0.1089 0.110889 -1.00 +1 4 9.8596 1.2544 @@ -790,13 +790,13 @@ NULL 1.258884 1.2544 1.258884 -15376.0000 +15376 15675.04 1576255.1401 9.8596 9.8596 -9.859600 -1.00000000000000000000 +9.8596 +1 NULL NULL PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 @@ -853,7 +853,7 @@ POSTHOOK: Input: default@decimal_udf 200 200 20 20 2 2 -1.0 1 +1 1 2 2 3.14 3 -1.12 -1 @@ -861,15 +861,15 @@ POSTHOOK: Input: default@decimal_udf -1.122 -11 1.12 1 1.122 1 -124.00 124 +124 124 125.2 125 -1255.49 -1255 3.14 3 3.14 3 -3.140 4 -1.0000000000 1 --1234567890.1234567890 -1234567890 -1234567890.1234567800 1234567890 +3.14 4 +1 1 +-1234567890.123456789 -1234567890 +1234567890.12345678 1234567890 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF @@ -917,26 +917,26 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -19360000 NULL -0.0000000000 +0 0 10000 100 1 -0.0 -0.00 +0 +0 40000 400 4 0 -0.0 -0.00 -0.0 -0.00 -0.000 -0.0 -0.00 -0.000 -1.0 +0 +0 +0 +0 +0 +0 +0 +0 +1 4 9.42 1.12 @@ -944,15 +944,15 @@ NULL 12.342 1.12 1.122 -15376.00 -15650.0 +15376 +15650 1575639.95 9.42 9.42 -12.560 -1.0000000000 -1524157875171467887.5019052100 -1524157875171467876.3907942000 +12.56 +1 +1524157875171467887.50190521 +1524157875171467876.3907942 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF @@ -1370,7 +1370,7 @@ POSTHOOK: Input: default@decimal_udf 0.785 1 1.0000000001 -1.000000000099999992710 +1.00000000009999999271 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 @@ -1574,7 +1574,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 4400 NULL -0.0000000000 +0 0 100 10 @@ -1593,7 +1593,7 @@ NULL 0.3 0.33 0.333 -1.0 +1 2 3.14 1.12 @@ -1601,15 +1601,15 @@ NULL 1.122 1.12 1.122 -124.00 +124 125.2 1255.49 3.14 3.14 -3.140 -1.0000000000 -1234567890.1234567890 -1234567890.1234567800 +3.14 +1 +1234567890.123456789 +1234567890.12345678 PREHOOK: query: -- avg EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value PREHOOK: type: QUERY @@ -1696,23 +1696,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.1234567890 +-1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.123456789 -1255 -1255.49 -1255.49 -1255.49 -11 -1.122 -1.122 -1.122 -1 -1.12 -1.12 -2.24 -0 0.02538461538461538461538 0.02538461538462 0.3300000000 -1 1.0484 1.0484 5.2420000000 +0 0.02538461538461538461538 0.02538461538462 0.33 +1 1.0484 1.0484 5.242 2 2 2 4 3 3.14 3.14 9.42 -4 3.14 3.14 3.140 +4 3.14 3.14 3.14 10 10 10 10 20 20 20 20 100 100 100 100 -124 124 124 124.00 +124 124 124 124 125 125.2 125.2 125.2 200 200 200 200 4400 -4400 -4400 -4400 -1234567890 1234567890.12345678 1234567890.12345678 1234567890.1234567800 +1234567890 1234567890.12345678 1234567890.12345678 1234567890.12345678 PREHOOK: query: -- negative EXPLAIN SELECT -key FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1762,7 +1762,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 4400 NULL -0.0000000000 +0 0 -100 -10 @@ -1781,7 +1781,7 @@ NULL 0.3 0.33 0.333 --1.0 +-1 -2 -3.14 1.12 @@ -1789,15 +1789,15 @@ NULL 1.122 -1.12 -1.122 --124.00 +-124 -125.2 1255.49 -3.14 -3.14 --3.140 --1.0000000000 -1234567890.1234567890 --1234567890.1234567800 +-3.14 +-1 +1234567890.123456789 +-1234567890.12345678 PREHOOK: query: -- positive EXPLAIN SELECT +key FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1829,7 +1829,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -4400 NULL -0.0000000000 +0 0 100 10 @@ -1848,7 +1848,7 @@ NULL -0.3 -0.33 -0.333 -1.0 +1 2 3.14 -1.12 @@ -1856,15 +1856,15 @@ NULL -1.122 1.12 1.122 -124.00 +124 125.2 -1255.49 3.14 3.14 -3.140 -1.0000000000 --1234567890.1234567890 -1234567890.1234567800 +3.14 +1 +-1234567890.123456789 +1234567890.12345678 PREHOOK: query: -- ceiling EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -2082,42 +2082,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --4400.00 +-4400 NULL -0.00 -0.00 -100.00 -10.00 -1.00 -0.10 +0 +0 +100 +10 +1 +0.1 0.01 -200.00 -20.00 -2.00 -0.00 -0.20 +200 +20 +2 +0 +0.2 0.02 -0.30 +0.3 0.33 0.33 --0.30 +-0.3 -0.33 -0.33 -1.00 -2.00 +1 +2 3.14 -1.12 -1.12 -1.12 1.12 1.12 -124.00 -125.20 +124 +125.2 -1255.49 3.14 3.14 3.14 -1.00 +1 -1234567890.12 1234567890.12 PREHOOK: query: -- power @@ -2257,38 +2257,38 @@ NULL NULL 1 1 -0.0 -0.00 -0.000 +0 +0 +0 1 1 0 NULL -0.0 -0.00 -0.10 -0.010 -0.0010 -0.10 -0.010 -0.0010 -0.0 0 -1.00 +0 +0.1 +0.01 +0.001 +0.1 +0.01 +0.001 +0 +0 +1 -0.12 -0.12 -0.122 0.44 0.439 -1.00 -1.0 +1 +1 -626.745 -1.00 -1.00 -1.000 -0.0000000000 +1 +1 +1 +0 -617283944.0617283945 -1.0000000000 +1 PREHOOK: query: -- stddev, var EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value PREHOOK: type: QUERY @@ -2608,7 +2608,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --1234567890.1234567890 +-1234567890.123456789 PREHOOK: query: -- max EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -2679,7 +2679,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -1234567890.1234567800 +1234567890.12345678 PREHOOK: query: -- count EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out index 14acfc5..d38faab 100644 --- a/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_reduce_groupby_decimal.q.out @@ -132,7 +132,7 @@ POSTHOOK: Input: default@decimal_test -1066226047 -9439.0 -5637.8891891892 -6752.515384615385 -5637.8891891892 -1065117869 2538.0 1515.9405405405 1815.646153846154 1515.9405405405 -1064949302 6454.0 3854.9567567568 4617.092307692308 3854.9567567568 --1063498122 -11480.0 -6856.9729729730 -8212.615384615387 -6856.9729729730 +-1063498122 -11480.0 -6856.972972973 -8212.615384615387 -6856.972972973 -1062973443 10541.0 6296.1108108108 7540.869230769231 6296.1108108108 -1061614989 -4234.0 -2528.9567567568 -3028.938461538462 -2528.9567567568 -1061057428 -1085.0 -648.0675675676 -776.1923076923077 -648.0675675676 @@ -140,14 +140,14 @@ POSTHOOK: Input: default@decimal_test -1059338191 7322.0 4373.4108108108 5238.046153846154 4373.4108108108 -1059047258 12452.0 7437.5459459459 8907.969230769231 7437.5459459459 -1056684111 13991.0 8356.7864864865 10008.946153846155 8356.7864864865 --1055945837 13690.0 8177.0 9793.615384615387 8177.0 +-1055945837 13690.0 8177 9793.615384615387 8177 -1055669248 2570.0 1535.0540540541 1838.538461538462 1535.0540540541 -1055316250 -14990.0 -8953.4864864865 -10723.615384615385 -8953.4864864865 -1053385587 14504.0 8663.2 10375.938461538462 8663.2 -1053238077 -3704.0 -2212.3891891892 -2649.784615384616 -2212.3891891892 -1052745800 -12404.0 -7408.8756756757 -8873.630769230771 -7408.8756756757 -1052322972 -7433.0 -4439.7108108108 -5317.453846153847 -4439.7108108108 --1050684541 -8261.0 -4934.2729729730 -5909.792307692308 -4934.2729729730 +-1050684541 -8261.0 -4934.272972973 -5909.792307692308 -4934.272972973 -1050657303 -6999.0 -4180.4837837838 -5006.976923076923 -4180.4837837838 -1050165799 8634.0 5157.0648648649 6176.63076923077 5157.0648648649 -1048934049 -524.0 -312.9837837838 -374.86153846153854 -312.9837837838 @@ -158,12 +158,12 @@ POSTHOOK: Input: default@decimal_test -1045087657 -5865.0 -3503.1486486486 -4195.7307692307695 -3503.1486486486 -1044207190 5381.0 3214.0567567568 3849.4846153846156 3214.0567567568 -1044093617 -3422.0 -2043.9513513514 -2448.046153846154 -2043.9513513514 --1043573508 16216.0 9685.7729729730 11600.676923076924 9685.7729729730 +-1043573508 16216.0 9685.772972973 11600.676923076924 9685.772972973 -1043132597 12302.0 7347.9513513514 8800.66153846154 7347.9513513514 -1043082182 9180.0 5483.1891891892 6567.2307692307695 5483.1891891892 --1042805968 5133.0 3065.9270270270 3672.0692307692307 3065.9270270270 +-1042805968 5133.0 3065.927027027 3672.0692307692307 3065.927027027 -1042712895 9296.0 5552.4756756757 6650.215384615385 5552.4756756757 --1042396242 9583.0 5723.9000000000 6855.53076923077 5723.9000000000 +-1042396242 9583.0 5723.9 6855.53076923077 5723.9 -1041734429 -836.0 -499.3405405405 -598.0615384615385 -499.3405405405 -1041391389 -12970.0 -7746.9459459459 -9278.538461538463 -7746.9459459459 -1041252354 756.0 451.5567567568 540.8307692307692 451.5567567568 diff --git a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out index 2cb21a4..0aeb53a 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out @@ -264,7 +264,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -284,7 +284,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -554,7 +554,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -574,7 +574,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -621,7 +621,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -641,7 +641,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -871,7 +871,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -891,7 +891,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -1128,7 +1128,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1148,7 +1148,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -1416,7 +1416,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1436,7 +1436,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -1714,7 +1714,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1734,7 +1734,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -1981,7 +1981,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2001,7 +2001,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2045,7 +2045,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2065,7 +2065,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2295,7 +2295,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2315,7 +2315,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2359,7 +2359,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2379,7 +2379,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2617,7 +2617,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2637,7 +2637,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2894,7 +2894,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2914,7 +2914,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -3173,7 +3173,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -3193,7 +3193,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -3462,7 +3462,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -3482,7 +3482,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -3788,7 +3788,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -3808,7 +3808,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -4177,7 +4177,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -4197,7 +4197,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -4241,7 +4241,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -4261,7 +4261,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -4501,7 +4501,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -4521,7 +4521,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -4783,7 +4783,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -4803,7 +4803,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -5248,7 +5248,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -5268,7 +5268,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -5756,7 +5756,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -5776,7 +5776,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -6113,7 +6113,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -6133,7 +6133,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -6466,7 +6466,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -6486,7 +6486,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -6816,7 +6816,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -6836,7 +6836,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -7214,7 +7214,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -7234,7 +7234,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -7563,7 +7563,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -7583,7 +7583,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc diff --git a/ql/src/test/results/clientpositive/timestamp_literal.q.out b/ql/src/test/results/clientpositive/timestamp_literal.q.out new file mode 100644 index 0000000..13ffaf1 --- /dev/null +++ b/ql/src/test/results/clientpositive/timestamp_literal.q.out @@ -0,0 +1,99 @@ +PREHOOK: query: explain +select timestamp '2011-01-01 01:01:01' +PREHOOK: type: QUERY +POSTHOOK: query: explain +select timestamp '2011-01-01 01:01:01' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: 2011-01-01 01:01:01.0 (type: timestamp) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + ListSink + +PREHOOK: query: select timestamp '2011-01-01 01:01:01' +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select timestamp '2011-01-01 01:01:01' +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +2011-01-01 01:01:01 +PREHOOK: query: explain +select '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100' +PREHOOK: type: QUERY +POSTHOOK: query: explain +select '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: true (type: boolean) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + ListSink + +PREHOOK: query: select '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100' +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100' +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +true +PREHOOK: query: explain +select 1 where timestamp '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100' +PREHOOK: type: QUERY +POSTHOOK: query: explain +select 1 where timestamp '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + Select Operator + expressions: 1 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 1 Basic stats: PARTIAL Column stats: COMPLETE + ListSink + +PREHOOK: query: select 1 where timestamp '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100' +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select 1 where timestamp '2011-01-01 01:01:01.101' <> timestamp '2011-01-01 01:01:01.100' +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 diff --git a/ql/src/test/results/clientpositive/udf_case.q.out b/ql/src/test/results/clientpositive/udf_case.q.out index 29905ab..ed0aac0 100644 --- a/ql/src/test/results/clientpositive/udf_case.q.out +++ b/ql/src/test/results/clientpositive/udf_case.q.out @@ -208,4 +208,4 @@ FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -123.0 123.0 abcd +123 123.0 abcd diff --git a/ql/src/test/results/clientpositive/udf_when.q.out b/ql/src/test/results/clientpositive/udf_when.q.out index 696d7b0..52f15b3 100644 --- a/ql/src/test/results/clientpositive/udf_when.q.out +++ b/ql/src/test/results/clientpositive/udf_when.q.out @@ -191,4 +191,4 @@ FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -123.0 123.0 abcd +123 123.0 abcd diff --git a/ql/src/test/results/clientpositive/update_all_types.q.out b/ql/src/test/results/clientpositive/update_all_types.q.out index f1353d0..eba4dde 100644 --- a/ql/src/test/results/clientpositive/update_all_types.q.out +++ b/ql/src/test/results/clientpositive/update_all_types.q.out @@ -87,16 +87,16 @@ POSTHOOK: query: select * from acid_uat order by i POSTHOOK: type: QUERY POSTHOOK: Input: default@acid_uat #### A masked pattern was here #### -11 NULL -1073279343 -1595604468 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true +11 NULL -1073279343 -1595604468 11.0 NULL 11 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true NULL -7382 -1073051226 -1887561756 NULL -7382.0 NULL NULL 1970-01-01 A34p7oRr2WvUJNf A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d false -11 NULL -1072910839 2048385991 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false +11 NULL -1072910839 2048385991 11.0 NULL 11 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false NULL 8373 -1072081801 1864027286 NULL 8373.0 NULL NULL 1970-01-01 dPkN74F7 dPkN74F7 4KWs6gw7lv2WYd66P true NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vklju213J1mr 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P true --51 NULL -1071480828 -1401575336 -51.0 NULL -51.0 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true -8 NULL -1071363017 1349676361 8.0 NULL 8.0 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true +-51 NULL -1071480828 -1401575336 -51.0 NULL -51 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true +8 NULL -1071363017 1349676361 8.0 NULL 8 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true NULL -741 -1070883071 -1645852809 NULL -741.0 NULL NULL 1970-01-01 0ruyd6Y50JpdGRf6HqD 0ruyd6Y50JpdGRf6HqD xH7445Rals48VOulSyR5F false NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false -11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true +11 NULL -1069736047 -453772520 11.0 NULL 11 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true PREHOOK: query: update acid_uat set ti = 1, si = 2, @@ -141,15 +141,15 @@ POSTHOOK: query: select * from acid_uat order by i POSTHOOK: type: QUERY POSTHOOK: Input: default@acid_uat #### A masked pattern was here #### -11 NULL -1073279343 -1595604468 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true +11 NULL -1073279343 -1595604468 11.0 NULL 11 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true NULL -7382 -1073051226 -1887561756 NULL -7382.0 NULL NULL 1970-01-01 A34p7oRr2WvUJNf A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d false -11 NULL -1072910839 2048385991 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false +11 NULL -1072910839 2048385991 11.0 NULL 11 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false NULL 8373 -1072081801 1864027286 NULL 8373.0 NULL NULL 1970-01-01 dPkN74F7 dPkN74F7 4KWs6gw7lv2WYd66P true NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vklju213J1mr 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P true --51 NULL -1071480828 -1401575336 -51.0 NULL -51.0 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true -8 NULL -1071363017 1349676361 8.0 NULL 8.0 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true +-51 NULL -1071480828 -1401575336 -51.0 NULL -51 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true +8 NULL -1071363017 1349676361 8.0 NULL 8 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false -11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true +11 NULL -1069736047 -453772520 11.0 NULL 11 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true 1 2 3 4 3.14 6.28 5.99 NULL 2014-09-01 its a beautiful day in the neighbhorhood a beautiful day for a neighbor wont you be mine true PREHOOK: query: update acid_uat set ti = ti * 2, @@ -175,13 +175,13 @@ POSTHOOK: query: select * from acid_uat order by i POSTHOOK: type: QUERY POSTHOOK: Input: default@acid_uat #### A masked pattern was here #### -11 NULL -1073279343 -1595604468 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true +11 NULL -1073279343 -1595604468 11.0 NULL 11 1969-12-31 16:00:02.351 NULL oj1YrV5Wa oj1YrV5Wa P76636jJ6qM17d7DIy true NULL -7382 -1073051226 -1887561756 NULL -7382.0 NULL NULL 1970-01-01 A34p7oRr2WvUJNf A34p7oRr2WvUJNf 4hA4KQj2vD3fI6gX82220d false -11 NULL -1072910839 2048385991 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false +11 NULL -1072910839 2048385991 11.0 NULL 11 1969-12-31 16:00:02.351 NULL 0iqrc5 0iqrc5 KbaDXiN85adbHRx58v false NULL 8373 -1072081801 1864027286 NULL 8373.0 NULL NULL 1970-01-01 dPkN74F7 dPkN74F7 4KWs6gw7lv2WYd66P true NULL -5470 -1072076362 1864027286 NULL -5470.0 NULL NULL 1970-01-01 2uLyD28144vklju213J1mr 2uLyD28144vklju213J1mr 4KWs6gw7lv2WYd66P true --102 -51 -1071480828 -1401575336 -51.0 -51.0 -51.0 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true -8 NULL -1071363017 1349676361 8.0 NULL 8.0 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true +-102 -51 -1071480828 -1401575336 -51.0 -51.0 -51 1969-12-31 16:00:08.451 NULL aw724t8c5558x2xneC624 aw724t8c5558x2xneC624 4uE7l74tESBiKfu7c8wM7GA true +8 NULL -1071363017 1349676361 8.0 NULL 8 1969-12-31 16:00:15.892 NULL Anj0oF Anj0oF IwE1G7Qb0B1NEfV030g true NULL -947 -1070551679 1864027286 NULL -947.0 NULL NULL 1970-01-01 iUR3Q iUR3Q 4KWs6gw7lv2WYd66P false -11 NULL -1069736047 -453772520 11.0 NULL 11.0 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true +11 NULL -1069736047 -453772520 11.0 NULL 11 1969-12-31 16:00:02.351 NULL k17Am8uPHWk02cEf1jet k17Am8uPHWk02cEf1jet qrXLLNX1 true 1 2 3 4 3.14 6.28 5.99 NULL 2014-09-01 its a beautiful day in the neighbhorhood a beautiful day for a neighbor wont you be mine true diff --git a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out index c0fe295..3a3e2de 100644 --- a/ql/src/test/results/clientpositive/vector_aggregate_9.q.out +++ b/ql/src/test/results/clientpositive/vector_aggregate_9.q.out @@ -164,4 +164,4 @@ POSTHOOK: query: select min(dc), max(dc), sum(dc), avg(dc) from vectortab2korc POSTHOOK: type: QUERY POSTHOOK: Input: default@vectortab2korc #### A masked pattern was here #### --4997414117561.546875 4994550248722.298828 -10252745435816.024410 -5399023399.587163986308583465 +-4997414117561.546875 4994550248722.298828 -10252745435816.02441 -5399023399.587163986308583465 diff --git a/ql/src/test/results/clientpositive/vector_between_in.q.out b/ql/src/test/results/clientpositive/vector_between_in.q.out index e21dd85..22c40fe 100644 --- a/ql/src/test/results/clientpositive/vector_between_in.q.out +++ b/ql/src/test/results/clientpositive/vector_between_in.q.out @@ -643,19 +643,19 @@ POSTHOOK: Input: default@decimal_date_test -18.5162162162 -17.3216216216 -16.7243243243 --16.1270270270 +-16.127027027 -15.5297297297 -10.7513513514 -9.5567567568 -8.3621621622 --5.9729729730 +-5.972972973 -3.5837837838 4.1810810811 4.7783783784 4.7783783784 5.3756756757 -5.9729729730 -5.9729729730 +5.972972973 +5.972972973 11.3486486486 11.3486486486 11.9459459459 diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out index 07d58ed..65aa5e9 100644 --- a/ql/src/test/results/clientpositive/vector_data_types.q.out +++ b/ql/src/test/results/clientpositive/vector_data_types.q.out @@ -151,7 +151,7 @@ POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc OR POSTHOOK: type: QUERY POSTHOOK: Input: default@over1korc #### A masked pattern was here #### -108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.70 undecided +108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.7 undecided 118 497 65536 4294967381 50.32 12.72 false david nixon 2013-03-01 09:11:58.703285 83.48 values clariffication 18 280 65536 4294967320 32.92 45.94 false holly white 2013-03-01 09:11:58.703086 58.86 topology 69 489 65536 4294967404 33.52 17.99 false oscar ichabod 2013-03-01 09:11:58.703247 32.68 topology @@ -237,7 +237,7 @@ POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc OR POSTHOOK: type: QUERY POSTHOOK: Input: default@over1korc #### A masked pattern was here #### -108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.70 undecided +108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.7 undecided 118 497 65536 4294967381 50.32 12.72 false david nixon 2013-03-01 09:11:58.703285 83.48 values clariffication 18 280 65536 4294967320 32.92 45.94 false holly white 2013-03-01 09:11:58.703086 58.86 topology 69 489 65536 4294967404 33.52 17.99 false oscar ichabod 2013-03-01 09:11:58.703247 32.68 topology diff --git a/ql/src/test/results/clientpositive/vector_decimal_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_2.q.out index d273810..fceb027 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_2.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_2.q.out @@ -1423,7 +1423,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) as c from POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_2 #### A masked pattern was here #### -1.0 +1 PREHOOK: query: explain select cast('0.99999999999999999999' as decimal(20,20)) as c from decimal_2 order by c PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/vector_decimal_3.q.out b/ql/src/test/results/clientpositive/vector_decimal_3.q.out index e982e1b..75f872e 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_3.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_3.q.out @@ -47,7 +47,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -56,7 +56,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -67,8 +67,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1.0 1 -1.000000000000000000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 @@ -76,14 +76,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -92,14 +92,14 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 200 200 125.2 125 -124.00 124 +124 124 100 100 20 20 10 10 -3.140 4 +3.14 4 3.14 3 3.14 3 3.14 3 @@ -107,8 +107,8 @@ POSTHOOK: Input: default@decimal_3 2 2 1.122 1 1.12 1 -1.000000000000000000 1 -1.0 1 +1 1 +1 1 1 1 0.333 0 0.33 0 @@ -119,7 +119,7 @@ POSTHOOK: Input: default@decimal_3 0.01 0 0 0 0 0 -0.000000000000000000 0 +0 0 -0.3 0 -0.33 0 -0.333 0 @@ -128,7 +128,7 @@ POSTHOOK: Input: default@decimal_3 -1.122 -11 -1255.49 -1255 -4400 4400 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 NULL 0 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value PREHOOK: type: QUERY @@ -139,7 +139,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -148,7 +148,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -159,8 +159,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1.0 1 -1.000000000000000000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 @@ -168,14 +168,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -185,7 +185,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL --1234567890.1234567890 +-1234567890.123456789 -4400 -1255.49 -1.122 @@ -193,7 +193,7 @@ NULL -0.333 -0.33 -0.3 -0.000000000000000000 +0 0.01 0.02 0.1 @@ -209,10 +209,10 @@ NULL 10 20 100 -124.00 +124 125.2 200 -1234567890.1234567800 +1234567890.12345678 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -222,7 +222,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -230,7 +230,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.000000000000000000 0 +0 0 0.01 0 0.02 0 0.1 0 @@ -246,10 +246,10 @@ NULL 0 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -258,23 +258,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### --1234567890 -1234567890.1234567890 +-1234567890 -1234567890.123456789 -1255 -1255.49 -11 -1.122 -1 -2.24 -0 0.330000000000000000 -1 5.242000000000000000 +0 0.33 +1 5.242 2 4 3 9.42 -4 3.140 +4 3.14 10 10 20 20 100 100 -124 124.00 +124 124 125 125.2 200 200 4400 -4400 -1234567890 1234567890.1234567800 +1234567890 1234567890.12345678 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -283,7 +283,7 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### --1234567890.1234567890 -1234567890 -1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -1234567890.123456789 -1234567890 -4400 4400 -4400 4400 -1255.49 -1255 -1255.49 -1255 -1.122 -11 -1.122 -11 @@ -294,7 +294,11 @@ POSTHOOK: Input: default@decimal_3 -0.333 0 -0.333 0 -0.33 0 -0.33 0 -0.3 0 -0.3 0 -0.000000000000000000 0 0.000000000000000000 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 @@ -307,8 +311,14 @@ POSTHOOK: Input: default@decimal_3 0.33 0 0.33 0 0.333 0 0.333 0 1 1 1 1 -1.0 1 1.0 1 -1.000000000000000000 1 1.000000000000000000 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 +1 1 1 1 1.12 1 1.12 1 1.122 1 1.122 1 2 2 2 2 @@ -324,14 +334,20 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 3.14 3 3.14 3 3.14 3 -3.140 4 3.140 4 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 3 3.14 4 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 3 +3.14 4 3.14 4 10 10 10 10 20 20 20 20 100 100 100 100 -124.00 124 124.00 124 +124 124 124 124 125.2 125 125.2 125 200 200 200 200 -1234567890.1234567800 1234567890 1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -343,7 +359,7 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -355,7 +371,7 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 PREHOOK: query: DROP TABLE DECIMAL_3_txt PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_3_txt diff --git a/ql/src/test/results/clientpositive/vector_decimal_4.q.out b/ql/src/test/results/clientpositive/vector_decimal_4.q.out index 483ae1f..613f5a8 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_4.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_4.q.out @@ -57,7 +57,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_1 #### A masked pattern was here #### NULL 0 --1234567890.1234567890 -1234567890 +-1234567890.123456789 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -66,7 +66,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0.0000000000000000000000000 0 +0 0 0 0 0 0 0.01 0 @@ -78,7 +78,7 @@ NULL 0 0.333 0 0.9999999999999999999999999 1 1 1 -1.0 1 +1 1 1.12 1 1.122 1 2 2 @@ -86,14 +86,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 20 20 100 100 -124.00 124 +124 124 125.2 125 200 200 -1234567890.1234567800 1234567890 +1234567890.12345678 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_4_2 @@ -103,7 +103,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_2 #### A masked pattern was here #### NULL NULL --1234567890.1234567890 -3703703670.3703703670 +-1234567890.123456789 -3703703670.370370367 -4400 -13200 -1255.49 -3766.47 -1.122 -3.366 @@ -112,7 +112,7 @@ NULL NULL -0.333 -0.999 -0.33 -0.99 -0.3 -0.9 -0.0000000000000000000000000 0.0000000000000000000000000 +0 0 0 0 0 0 0.01 0.03 @@ -124,7 +124,7 @@ NULL NULL 0.333 0.999 0.9999999999999999999999999 2.9999999999999999999999997 1 3 -1.0 3.0 +1 3 1.12 3.36 1.122 3.366 2 6 @@ -132,14 +132,14 @@ NULL NULL 3.14 9.42 3.14 9.42 3.14 9.42 -3.140 9.420 +3.14 9.42 10 30 20 60 100 300 -124.00 372.00 +124 372 125.2 375.6 200 600 -1234567890.1234567800 3703703670.3703703400 +1234567890.12345678 3703703670.37037034 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_4_2 @@ -149,7 +149,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_2 #### A masked pattern was here #### NULL NULL --1234567890.1234567890 -3703703670.3703703670 +-1234567890.123456789 -3703703670.370370367 -4400 -13200 -1255.49 -3766.47 -1.122 -3.366 @@ -158,7 +158,7 @@ NULL NULL -0.333 -0.999 -0.33 -0.99 -0.3 -0.9 -0.0000000000000000000000000 0.0000000000000000000000000 +0 0 0 0 0 0 0.01 0.03 @@ -170,7 +170,7 @@ NULL NULL 0.333 0.999 0.9999999999999999999999999 2.9999999999999999999999997 1 3 -1.0 3.0 +1 3 1.12 3.36 1.122 3.366 2 6 @@ -178,14 +178,14 @@ NULL NULL 3.14 9.42 3.14 9.42 3.14 9.42 -3.140 9.420 +3.14 9.42 10 30 20 60 100 300 -124.00 372.00 +124 372 125.2 375.6 200 600 -1234567890.1234567800 3703703670.3703703400 +1234567890.12345678 3703703670.37037034 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_4_2 @@ -195,7 +195,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_2 #### A masked pattern was here #### NULL NULL --1234567890.1234567890 -3703703670.3703703670 +-1234567890.123456789 -3703703670.370370367 -4400 -13200 -1255.49 -3766.47 -1.122 -3.366 @@ -204,7 +204,7 @@ NULL NULL -0.333 -0.999 -0.33 -0.99 -0.3 -0.9 -0.0000000000000000000000000 0.0000000000000000000000000 +0 0 0 0 0 0 0.01 0.03 @@ -216,7 +216,7 @@ NULL NULL 0.333 0.999 0.9999999999999999999999999 2.9999999999999999999999997 1 3 -1.0 3.0 +1 3 1.12 3.36 1.122 3.366 2 6 @@ -224,14 +224,14 @@ NULL NULL 3.14 9.42 3.14 9.42 3.14 9.42 -3.140 9.420 +3.14 9.42 10 30 20 60 100 300 -124.00 372.00 +124 372 125.2 375.6 200 600 -1234567890.1234567800 3703703670.3703703400 +1234567890.12345678 3703703670.37037034 PREHOOK: query: DROP TABLE DECIMAL_4_1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_4_1 diff --git a/ql/src/test/results/clientpositive/vector_decimal_5.q.out b/ql/src/test/results/clientpositive/vector_decimal_5.q.out index 01b5f42..34c3351 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_5.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_5.q.out @@ -67,7 +67,7 @@ NULL -0.333 -0.33 -0.3 -0.00000 +0 0 0 0.01 @@ -78,8 +78,8 @@ NULL 0.33 0.333 1 -1.0 -1.00000 +1 +1 1.12 1.122 2 @@ -87,11 +87,11 @@ NULL 3.14 3.14 3.14 -3.140 +3.14 10 20 100 -124.00 +124 125.2 200 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key @@ -110,7 +110,7 @@ NULL -0.333 -0.33 -0.3 -0.00000 +0 0.01 0.02 0.1 @@ -126,7 +126,7 @@ NULL 10 20 100 -124.00 +124 125.2 200 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 @@ -185,7 +185,7 @@ POSTHOOK: Input: default@decimal_5 #### A masked pattern was here #### NULL NULL -0.000 +0 0 100 10 @@ -204,7 +204,7 @@ NULL -0.3 -0.33 -0.333 -1.0 +1 2 3.14 -1.12 @@ -212,13 +212,13 @@ NULL -1.122 1.12 1.122 -124.00 +124 125.2 NULL 3.14 3.14 -3.140 -1.000 +3.14 +1 NULL NULL PREHOOK: query: DROP TABLE DECIMAL_5_txt diff --git a/ql/src/test/results/clientpositive/vector_decimal_6.q.out b/ql/src/test/results/clientpositive/vector_decimal_6.q.out index 7ecd500..9cdd7fc 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_6.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_6.q.out @@ -125,20 +125,20 @@ NULL 1234567890 -1.12 -1 -0.333 0 -0.3 0 -0.00000 0 +0 0 0 0 0.333 0 -1.0 1 -1.00000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 10.73433 5 -124.00 124 +124 124 125.2 125 23232.23435 2 PREHOOK: query: SELECT * FROM DECIMAL_6_2 ORDER BY key, value @@ -157,20 +157,20 @@ NULL 0 -1.12 -1 -0.333 0 -0.3 0 -0.0000 0 +0 0 0 0 0.333 0 -1.0 1 -1.0000 1 +1 1 +1 1 1.12 1 1.122 1 2 2 3.14 3 3.14 3 -3.140 4 +3.14 4 10 10 10.7343 5 -124.00 124 +124 124 125.2 125 23232.2344 2 2389432.2375 3 @@ -213,16 +213,16 @@ NULL -0.333 -0.3 -0.3 -0.00000 -0.0000 +0 +0 0 0 0.333 0.333 -1.0 -1.0 -1.0000 -1.00000 +1 +1 +1 +1 1.12 1.12 1.122 @@ -233,14 +233,14 @@ NULL 3.14 3.14 3.14 -3.140 -3.140 +3.14 +3.14 10 10 10.7343 10.73433 -124.00 -124.00 +124 +124 125.2 125.2 23232.23435 diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out index 7f2cd53..586f29b 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out @@ -108,9 +108,9 @@ POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 -3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 --563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 +-563 2 -515.621072973 -3367.6517567568 -3883.2728297298 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 -6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 +6981 3 5831542.269248378 -515.621072973 5830511.027102432 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 @@ -206,9 +206,9 @@ POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 1956.576923076922966667 6821.495748565159 6822.606289190924 -3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 --563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 -2325.50327307692295 1707.9424961538462 2415.395441814127 +-563 2 -515.621072973 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 -2325.50327307692295 1707.9424961538462 2415.395441814127 762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 3493144.07839499984625 3491310.1327026924 4937458.140118758 -6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 +6981 3 5831542.269248378 -515.621072973 5830511.027102432 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 diff --git a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out index d489046..2053452 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out @@ -46,13 +46,13 @@ POSTHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS D POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### --13326.0 528534767 true 1969-12-31 15:59:46.674 -13326.0 528534767 1 -13 --15813.0 528534767 true 1969-12-31 15:59:55.787 -15813.0 528534767 1 -4 --9566.0 528534767 true 1969-12-31 15:59:44.187 -9566.0 528534767 1 -16 -15007.0 528534767 true 1969-12-31 15:59:50.434 15007.0 528534767 1 -10 -7021.0 528534767 true 1969-12-31 16:00:15.007 7021.0 528534767 1 15 -4963.0 528534767 true 1969-12-31 16:00:07.021 4963.0 528534767 1 7 --7824.0 528534767 true 1969-12-31 16:00:04.963 -7824.0 528534767 1 5 --15431.0 528534767 true 1969-12-31 15:59:52.176 -15431.0 528534767 1 -8 --15549.0 528534767 true 1969-12-31 15:59:44.569 -15549.0 528534767 1 -15 -5780.0 528534767 true 1969-12-31 15:59:44.451 5780.0 528534767 1 -16 +-13326.0 528534767 true 1969-12-31 15:59:46.674 -13326 528534767 1 -13 +-15813.0 528534767 true 1969-12-31 15:59:55.787 -15813 528534767 1 -4 +-9566.0 528534767 true 1969-12-31 15:59:44.187 -9566 528534767 1 -16 +15007.0 528534767 true 1969-12-31 15:59:50.434 15007 528534767 1 -10 +7021.0 528534767 true 1969-12-31 16:00:15.007 7021 528534767 1 15 +4963.0 528534767 true 1969-12-31 16:00:07.021 4963 528534767 1 7 +-7824.0 528534767 true 1969-12-31 16:00:04.963 -7824 528534767 1 5 +-15431.0 528534767 true 1969-12-31 15:59:52.176 -15431 528534767 1 -8 +-15549.0 528534767 true 1969-12-31 15:59:44.569 -15549 528534767 1 -15 +5780.0 528534767 true 1969-12-31 15:59:44.451 5780 528534767 1 -16 diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out index 2c15b9a..9c3b076 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out @@ -57,12 +57,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_test #### A masked pattern was here #### 19699.417463617423 -12507.913305613346 0.8351496686995997 2.8303425077026896E7 3.6405405405 8963 10735 -17 8963 true 10735.776923076923 8963.641 10735.776923076923 1969-12-31 18:29:23.64054054 -9216.339708939685 -5851.806444906470 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 +9216.339708939685 -5851.80644490647 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 6514.8403326403464 -4136.5212058211928 0.8355907765708067 3095563.9418919063 4.3864864865 2964 3550 -34 2964 true 3550.4538461538464 2964.3865 3550.4538461538464 1969-12-31 16:49:24.386486486 7587.301455301477 -4817.467775467754 0.8354976172734904 4198623.24324327 2.3783783784 3452 4134 38 3452 true 4134.923076923077 3452.3784 4134.923076923077 1969-12-31 16:57:32.378378378 -19197.9729729730 -12189.5270270270 0.835155361813429 2.6880848817567654E7 5.4729729730 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 +19197.972972973 -12189.527027027 0.835155361813429 2.6880848817567654E7 5.472972973 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 17098.9945945946 -10856.8054054054 0.8351828165813104 2.132423090270272E7 0.3945945946 7780 9318 102 7780 true 9318.6 7780.3945 9318.6 1969-12-31 18:09:40.394594594 12433.723076923077 -7894.646153846154 0.8352770361086894 1.12754688E7 7.6 5657 6776 120 5657 true 6776.123076923077 5657.6 6776.123076923077 1969-12-31 17:34:17.6 7247.316839916862 -4601.598544698524 0.8355241651897876 3830775.6932432684 7.6783783784 3297 3949 109 3297 true 3949.638461538462 3297.6785 3949.638461538462 1969-12-31 16:54:57.678378378 -14757.1700623700465 -9369.8914760914930 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 +14757.1700623700465 -9369.891476091493 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 10964.832016631993 -6961.991060291086 0.8353232978714221 8768719.779729689 9.2243243243 4989 5975 87 4989 true 5975.607692307693 4989.224 5975.607692307693 1969-12-31 17:23:09.224324324 diff --git a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out index 193e6f7..c88322d 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out @@ -113,26 +113,26 @@ POSTHOOK: query: SELECT l.cint, r.cint, l.cdecimal1, r.cdecimal2 POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_mapjoin #### A masked pattern was here #### -6981 6981 -515.6210729730 -617.5607769230769 -6981 6981 -515.6210729730 -617.5607769230769 -6981 6981 -515.6210729730 -617.5607769230769 -6981 6981 -515.6210729730 -617.5607769230769 -6981 6981 -515.6210729730 6984454.211097692 -6981 6981 -515.6210729730 6984454.211097692 -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL -6981 6981 -515.6210729730 NULL +6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.621072973 6984454.211097692 +6981 6981 -515.621072973 6984454.211097692 +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL +6981 6981 -515.621072973 NULL 6981 6981 5831542.269248378 -617.5607769230769 6981 6981 5831542.269248378 -617.5607769230769 6981 6981 5831542.269248378 6984454.211097692 diff --git a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out index 8789864..438ebc4 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_precision.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_precision.q.out @@ -99,13 +99,13 @@ NULL NULL NULL NULL -0.0000000000 -0.0000000000 -0.0000000000 -0.0000000000 0 -0.1234567890 -0.1234567890 +0 +0 +0 +0 +0.123456789 +0.123456789 1.2345678901 1.2345678901 1.2345678901 @@ -129,7 +129,7 @@ NULL 123456789.0123456 123456789.0123456789 1234567890.123456 -1234567890.1234567890 +1234567890.123456789 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -182,13 +182,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 -0.0000000000 1.0000000000 -1.0000000000 0 1 -1 -0.1234567890 1.1234567890 -0.8765432110 -0.1234567890 1.1234567890 -0.8765432110 +0 1 -1 +0 1 -1 +0 1 -1 +0 1 -1 +0.123456789 1.123456789 -0.876543211 +0.123456789 1.123456789 -0.876543211 1.2345678901 2.2345678901 0.2345678901 1.2345678901 2.2345678901 0.2345678901 1.2345678901 2.2345678901 0.2345678901 @@ -212,7 +212,7 @@ NULL NULL NULL 123456789.0123456 123456790.0123456 123456788.0123456 123456789.0123456789 123456790.0123456789 123456788.0123456789 1234567890.123456 1234567891.123456 1234567889.123456 -1234567890.1234567890 1234567891.1234567890 1234567889.1234567890 +1234567890.123456789 1234567891.123456789 1234567889.123456789 PREHOOK: query: SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -265,13 +265,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 -0.0000000000 0.0000000000 0 0 0 0 -0.1234567890 0.2469135780 0.041152263 -0.1234567890 0.2469135780 0.041152263 +0 0 0 +0 0 0 +0 0 0 +0 0 0 +0.123456789 0.246913578 0.041152263 +0.123456789 0.246913578 0.041152263 1.2345678901 2.4691357802 0.411522630033 1.2345678901 2.4691357802 0.411522630033 1.2345678901 2.4691357802 0.411522630033 @@ -281,9 +281,9 @@ NULL NULL NULL 123.4567890123 246.9135780246 41.1522630041 123.4567890123 246.9135780246 41.1522630041 123.4567890123 246.9135780246 41.1522630041 -1234.5678901235 2469.1357802470 411.522630041167 -1234.5678901235 2469.1357802470 411.522630041167 -1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 +1234.5678901235 2469.135780247 411.522630041167 12345.6789012346 24691.3578024692 4115.226300411533 12345.6789012346 24691.3578024692 4115.226300411533 123456.7890123456 246913.5780246912 41152.2630041152 @@ -295,7 +295,7 @@ NULL NULL NULL 123456789.0123456 246913578.0246912 41152263.0041152 123456789.0123456789 246913578.0246913578 41152263.0041152263 1234567890.123456 2469135780.246912 411522630.041152 -1234567890.1234567890 2469135780.2469135780 411522630.041152263 +1234567890.123456789 2469135780.246913578 411522630.041152263 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -348,13 +348,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 0 0 -0.1234567890 0.013717421 -0.1234567890 0.013717421 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.013717421 +0.123456789 0.013717421 1.2345678901 0.137174210011 1.2345678901 0.137174210011 1.2345678901 0.137174210011 @@ -378,7 +378,7 @@ NULL NULL 123456789.0123456 13717421.001371733333 123456789.0123456789 13717421.0013717421 1234567890.123456 137174210.013717333333 -1234567890.1234567890 137174210.013717421 +1234567890.123456789 137174210.013717421 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -431,13 +431,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 -0.0000000000 0 0 0 -0.1234567890 0.0045724736667 -0.1234567890 0.0045724736667 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.0045724736667 +0.123456789 0.0045724736667 1.2345678901 0.0457247366704 1.2345678901 0.0457247366704 1.2345678901 0.0457247366704 @@ -461,7 +461,7 @@ NULL NULL 123456789.0123456 4572473.6671239111111 123456789.0123456789 4572473.6671239140333 1234567890.123456 45724736.6712391111111 -1234567890.1234567890 45724736.6712391403333 +1234567890.123456789 45724736.6712391403333 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -514,13 +514,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 -0.0000000000 0.00000000000000000000 0 0 -0.1234567890 0.01524157875019052100 -0.1234567890 0.01524157875019052100 +0 0 +0 0 +0 0 +0 0 +0.123456789 0.015241578750190521 +0.123456789 0.015241578750190521 1.2345678901 1.52415787526596567801 1.2345678901 1.52415787526596567801 1.2345678901 1.52415787526596567801 @@ -544,7 +544,7 @@ NULL NULL 123456789.0123456 15241578753238817.26870921383936 123456789.0123456789 15241578753238836.75019051998750190521 1234567890.123456 NULL -1234567890.1234567890 NULL +1234567890.123456789 NULL PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION diff --git a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out index 482f536..c4a015d 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out @@ -114,7 +114,7 @@ FROM decimal_tbl_1_orc ORDER BY d POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_tbl_1_orc #### A masked pattern was here #### -55555 55555 55555.0 55555.00 55555.000 55560 55600 56000 60000 100000 0 0 0 +55555 55555 55555 55555 55555 55560 55600 56000 60000 100000 0 0 0 PREHOOK: query: create table decimal_tbl_2_orc (pos decimal(38,18), neg decimal(38,18)) STORED AS ORC PREHOOK: type: CREATETABLE @@ -226,7 +226,7 @@ FROM decimal_tbl_2_orc ORDER BY p POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_tbl_2_orc #### A masked pattern was here #### -125 125 125.3 125.32 125.315 125.3150 130 100 0 0 -125 -125 -125.3 -125.32 -125.315 -125.3150 -130 -100 0 0 +125 125 125.3 125.32 125.315 125.315 130 100 0 0 -125 -125 -125.3 -125.32 -125.315 -125.315 -130 -100 0 0 PREHOOK: query: create table decimal_tbl_3_orc (dec decimal(38,18)) STORED AS ORC PREHOOK: type: CREATETABLE @@ -381,7 +381,7 @@ FROM decimal_tbl_3_orc ORDER BY d POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_tbl_3_orc #### A masked pattern was here #### -0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3.1 3.14 3.142 3.1416 3.14159 3.141593 3.1415927 3.14159265 3.141592654 3.1415926536 3.14159265359 3.141592653590 3.1415926535898 3.1415926535898 3.14159265358979 3.141592653589793 3.1415926535897930 +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 3 3.1 3.14 3.142 3.1416 3.14159 3.141593 3.1415927 3.14159265 3.141592654 3.1415926536 3.14159265359 3.14159265359 3.1415926535898 3.1415926535898 3.14159265358979 3.141592653589793 3.141592653589793 PREHOOK: query: create table decimal_tbl_4_orc (pos decimal(38,18), neg decimal(38,18)) STORED AS ORC PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out b/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out index cb0b5a2..ffdb1c9 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_trailing.q.out @@ -76,13 +76,13 @@ POSTHOOK: Input: default@decimal_trailing 0 0 0 1 0 0 2 NULL NULL -3 1.0000 1.00000000 -4 10.0000 10.00000000 -5 100.0000 100.00000000 -6 1000.0000 1000.00000000 -7 10000.0000 10000.00000000 -8 100000.0000 100000.00000000 -9 NULL 1000000.00000000 +3 1 1 +4 10 10 +5 100 100 +6 1000 1000 +7 10000 10000 +8 100000 100000 +9 NULL 1000000 10 NULL NULL 11 NULL NULL 12 NULL NULL @@ -91,18 +91,18 @@ POSTHOOK: Input: default@decimal_trailing 15 NULL NULL 16 NULL NULL 17 NULL NULL -18 1.0000 1.00000000 -19 10.000 10.0000000 -20 100.00 100.000000 -21 1000.0 1000.00000 -22 100000 10000.0000 -23 0.0000 0.00000000 -24 0.000 0.0000000 -25 0.00 0.000000 -26 0.0 0.00000 -27 0 0.00000 -28 12313.2000 134134.31252500 -29 99999.9990 134134.31242553 +18 1 1 +19 10 10 +20 100 100 +21 1000 1000 +22 100000 10000 +23 0 0 +24 0 0 +25 0 0 +26 0 0 +27 0 0 +28 12313.2 134134.312525 +29 99999.999 134134.31242553 PREHOOK: query: DROP TABLE DECIMAL_TRAILING_txt PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_trailing_txt diff --git a/ql/src/test/results/clientpositive/vector_decimal_udf.q.out b/ql/src/test/results/clientpositive/vector_decimal_udf.q.out index 24a87c5..05cd5b3 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_udf.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_udf.q.out @@ -94,7 +94,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -8800 NULL -0.0000000000 +0 0 200 20 @@ -113,7 +113,7 @@ NULL -0.6 -0.66 -0.666 -2.0 +2 4 6.28 -2.24 @@ -121,15 +121,15 @@ NULL -2.244 2.24 2.244 -248.00 +248 250.4 -2510.98 6.28 6.28 -6.280 -2.0000000000 --2469135780.2469135780 -2469135780.2469135600 +6.28 +2 +-2469135780.246913578 +2469135780.24691356 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF @@ -174,7 +174,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 0 NULL -0.0000000000 +0 0 200 20 @@ -193,7 +193,7 @@ NULL -0.3 -0.33 -0.333 -2.0 +2 4 6.14 -2.12 @@ -201,15 +201,15 @@ NULL -12.122 2.12 2.122 -248.00 +248 250.2 -2510.49 6.14 6.14 -7.140 -2.0000000000 --2469135780.1234567890 -2469135780.1234567800 +7.14 +2 +-2469135780.123456789 +2469135780.12345678 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF @@ -415,42 +415,42 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 0 NULL -0.0000000000 0 0 0 0 -0.0 -0.00 0 0 0 0 -0.0 -0.00 -0.0 -0.00 -0.000 -0.0 -0.00 -0.000 -0.0 0 -0.00 -0.00 -0.00 -0.000 -0.00 -0.000 -0.00 -0.0 -0.00 -0.00 -0.00 -0.000 -0.0000000000 -0.0000000000 -0.0000000000 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF @@ -495,7 +495,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -8800 NULL -0.0000000000 +0 0 0 0 @@ -514,7 +514,7 @@ NULL -0.3 -0.33 -0.333 -0.0 +0 0 0.14 -0.12 @@ -522,15 +522,15 @@ NULL 9.878 0.12 0.122 -0.00 +0 0.2 -0.49 0.14 0.14 --0.860 -0.0000000000 --0.1234567890 -0.1234567800 +-0.86 +0 +-0.123456789 +0.12345678 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF @@ -736,7 +736,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 19360000 NULL -0.00000000000000000000 +0 0 10000 100 @@ -755,7 +755,7 @@ NULL 0.09 0.1089 0.110889 -1.00 +1 4 9.8596 1.2544 @@ -763,13 +763,13 @@ NULL 1.258884 1.2544 1.258884 -15376.0000 +15376 15675.04 1576255.1401 9.8596 9.8596 -9.859600 -1.00000000000000000000 +9.8596 +1 NULL NULL PREHOOK: query: EXPLAIN SELECT key, value FROM DECIMAL_UDF where key * value > 0 @@ -823,7 +823,7 @@ POSTHOOK: Input: default@decimal_udf 200 200 20 20 2 2 -1.0 1 +1 1 2 2 3.14 3 -1.12 -1 @@ -831,15 +831,15 @@ POSTHOOK: Input: default@decimal_udf -1.122 -11 1.12 1 1.122 1 -124.00 124 +124 124 125.2 125 -1255.49 -1255 3.14 3 3.14 3 -3.140 4 -1.0000000000 1 --1234567890.1234567890 -1234567890 -1234567890.1234567800 1234567890 +3.14 4 +1 1 +-1234567890.123456789 -1234567890 +1234567890.12345678 1234567890 PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF @@ -884,26 +884,26 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -19360000 NULL -0.0000000000 +0 0 10000 100 1 -0.0 -0.00 +0 +0 40000 400 4 0 -0.0 -0.00 -0.0 -0.00 -0.000 -0.0 -0.00 -0.000 -1.0 +0 +0 +0 +0 +0 +0 +0 +0 +1 4 9.42 1.12 @@ -911,15 +911,15 @@ NULL 12.342 1.12 1.122 -15376.00 -15650.0 +15376 +15650 1575639.95 9.42 9.42 -12.560 -1.0000000000 -1524157875171467887.5019052100 -1524157875171467876.3907942000 +12.56 +1 +1524157875171467887.50190521 +1524157875171467876.3907942 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF @@ -1319,7 +1319,7 @@ POSTHOOK: Input: default@decimal_udf 0.785 1 1.0000000001 -1.000000000099999992710 +1.00000000009999999271 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 @@ -1514,7 +1514,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 4400 NULL -0.0000000000 +0 0 100 10 @@ -1533,7 +1533,7 @@ NULL 0.3 0.33 0.333 -1.0 +1 2 3.14 1.12 @@ -1541,15 +1541,15 @@ NULL 1.122 1.12 1.122 -124.00 +124 125.2 1255.49 3.14 3.14 -3.140 -1.0000000000 -1234567890.1234567890 -1234567890.1234567800 +3.14 +1 +1234567890.123456789 +1234567890.12345678 PREHOOK: query: -- avg EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value PREHOOK: type: QUERY @@ -1639,23 +1639,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.1234567890 +-1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.123456789 -1255 -1255.49 -1255.49 -1255.49 -11 -1.122 -1.122 -1.122 -1 -1.12 -1.12 -2.24 -0 0.02538461538461538461538 0.02538461538462 0.3300000000 -1 1.0484 1.0484 5.2420000000 +0 0.02538461538461538461538 0.02538461538462 0.33 +1 1.0484 1.0484 5.242 2 2 2 4 3 3.14 3.14 9.42 -4 3.14 3.14 3.140 +4 3.14 3.14 3.14 10 10 10 10 20 20 20 20 100 100 100 100 -124 124 124 124.00 +124 124 124 124 125 125.2 125.2 125.2 200 200 200 200 4400 -4400 -4400 -4400 -1234567890 1234567890.12345678 1234567890.12345678 1234567890.1234567800 +1234567890 1234567890.12345678 1234567890.12345678 1234567890.12345678 PREHOOK: query: -- negative EXPLAIN SELECT -key FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1702,7 +1702,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 4400 NULL -0.0000000000 +0 0 -100 -10 @@ -1721,7 +1721,7 @@ NULL 0.3 0.33 0.333 --1.0 +-1 -2 -3.14 1.12 @@ -1729,15 +1729,15 @@ NULL 1.122 -1.12 -1.122 --124.00 +-124 -125.2 1255.49 -3.14 -3.14 --3.140 --1.0000000000 -1234567890.1234567890 --1234567890.1234567800 +-3.14 +-1 +1234567890.123456789 +-1234567890.12345678 PREHOOK: query: -- positive EXPLAIN SELECT +key FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1771,7 +1771,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -4400 NULL -0.0000000000 +0 0 100 10 @@ -1790,7 +1790,7 @@ NULL -0.3 -0.33 -0.333 -1.0 +1 2 3.14 -1.12 @@ -1798,15 +1798,15 @@ NULL -1.122 1.12 1.122 -124.00 +124 125.2 -1255.49 3.14 3.14 -3.140 -1.0000000000 --1234567890.1234567890 -1234567890.1234567800 +3.14 +1 +-1234567890.123456789 +1234567890.12345678 PREHOOK: query: -- ceiling EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -2015,42 +2015,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --4400.00 +-4400 NULL -0.00 -0.00 -100.00 -10.00 -1.00 -0.10 +0 +0 +100 +10 +1 +0.1 0.01 -200.00 -20.00 -2.00 -0.00 -0.20 +200 +20 +2 +0 +0.2 0.02 -0.30 +0.3 0.33 0.33 --0.30 +-0.3 -0.33 -0.33 -1.00 -2.00 +1 +2 3.14 -1.12 -1.12 -1.12 1.12 1.12 -124.00 -125.20 +124 +125.2 -1255.49 3.14 3.14 3.14 -1.00 +1 -1234567890.12 1234567890.12 PREHOOK: query: -- power @@ -2184,38 +2184,38 @@ NULL NULL 1 1 -0.0 -0.00 -0.000 +0 +0 +0 1 1 0 NULL -0.0 -0.00 -0.10 -0.010 -0.0010 -0.10 -0.010 -0.0010 -0.0 0 -1.00 +0 +0.1 +0.01 +0.001 +0.1 +0.01 +0.001 +0 +0 +1 -0.12 -0.12 -0.122 0.44 0.439 -1.00 -1.0 +1 +1 -626.745 -1.00 -1.00 -1.000 -0.0000000000 +1 +1 +1 +0 -617283944.0617283945 -1.0000000000 +1 PREHOOK: query: -- stddev, var EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value PREHOOK: type: QUERY @@ -2510,7 +2510,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --1234567890.1234567890 +-1234567890.123456789 PREHOOK: query: -- max EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -2574,7 +2574,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -1234567890.1234567800 +1234567890.12345678 PREHOOK: query: -- count EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out index 2343508..527a117 100644 --- a/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out +++ b/ql/src/test/results/clientpositive/vector_reduce_groupby_decimal.q.out @@ -134,7 +134,7 @@ POSTHOOK: Input: default@decimal_test -1066226047 -9439.0 -5637.8891891892 -6752.515384615385 -5637.8891891892 -1065117869 2538.0 1515.9405405405 1815.646153846154 1515.9405405405 -1064949302 6454.0 3854.9567567568 4617.092307692308 3854.9567567568 --1063498122 -11480.0 -6856.9729729730 -8212.615384615387 -6856.9729729730 +-1063498122 -11480.0 -6856.972972973 -8212.615384615387 -6856.972972973 -1062973443 10541.0 6296.1108108108 7540.869230769231 6296.1108108108 -1061614989 -4234.0 -2528.9567567568 -3028.938461538462 -2528.9567567568 -1061057428 -1085.0 -648.0675675676 -776.1923076923077 -648.0675675676 @@ -142,14 +142,14 @@ POSTHOOK: Input: default@decimal_test -1059338191 7322.0 4373.4108108108 5238.046153846154 4373.4108108108 -1059047258 12452.0 7437.5459459459 8907.969230769231 7437.5459459459 -1056684111 13991.0 8356.7864864865 10008.946153846155 8356.7864864865 --1055945837 13690.0 8177.0 9793.615384615387 8177.0 +-1055945837 13690.0 8177 9793.615384615387 8177 -1055669248 2570.0 1535.0540540541 1838.538461538462 1535.0540540541 -1055316250 -14990.0 -8953.4864864865 -10723.615384615385 -8953.4864864865 -1053385587 14504.0 8663.2 10375.938461538462 8663.2 -1053238077 -3704.0 -2212.3891891892 -2649.784615384616 -2212.3891891892 -1052745800 -12404.0 -7408.8756756757 -8873.630769230771 -7408.8756756757 -1052322972 -7433.0 -4439.7108108108 -5317.453846153847 -4439.7108108108 --1050684541 -8261.0 -4934.2729729730 -5909.792307692308 -4934.2729729730 +-1050684541 -8261.0 -4934.272972973 -5909.792307692308 -4934.272972973 -1050657303 -6999.0 -4180.4837837838 -5006.976923076923 -4180.4837837838 -1050165799 8634.0 5157.0648648649 6176.63076923077 5157.0648648649 -1048934049 -524.0 -312.9837837838 -374.86153846153854 -312.9837837838 @@ -160,12 +160,12 @@ POSTHOOK: Input: default@decimal_test -1045087657 -5865.0 -3503.1486486486 -4195.7307692307695 -3503.1486486486 -1044207190 5381.0 3214.0567567568 3849.4846153846156 3214.0567567568 -1044093617 -3422.0 -2043.9513513514 -2448.046153846154 -2043.9513513514 --1043573508 16216.0 9685.7729729730 11600.676923076924 9685.7729729730 +-1043573508 16216.0 9685.772972973 11600.676923076924 9685.772972973 -1043132597 12302.0 7347.9513513514 8800.66153846154 7347.9513513514 -1043082182 9180.0 5483.1891891892 6567.2307692307695 5483.1891891892 --1042805968 5133.0 3065.9270270270 3672.0692307692307 3065.9270270270 +-1042805968 5133.0 3065.927027027 3672.0692307692307 3065.927027027 -1042712895 9296.0 5552.4756756757 6650.215384615385 5552.4756756757 --1042396242 9583.0 5723.9000000000 6855.53076923077 5723.9000000000 +-1042396242 9583.0 5723.9 6855.53076923077 5723.9 -1041734429 -836.0 -499.3405405405 -598.0615384615385 -499.3405405405 -1041391389 -12970.0 -7746.9459459459 -9278.538461538463 -7746.9459459459 -1041252354 756.0 451.5567567568 540.8307692307692 451.5567567568 diff --git a/ql/src/test/results/clientpositive/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/vectorized_ptf.q.out index 08de87a..f0e5b91 100644 --- a/ql/src/test/results/clientpositive/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/vectorized_ptf.q.out @@ -259,7 +259,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -279,7 +279,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -605,7 +605,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -625,7 +625,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -942,7 +942,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -962,7 +962,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -1193,7 +1193,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1213,7 +1213,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -1522,7 +1522,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1542,7 +1542,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -1861,7 +1861,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -1881,7 +1881,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2212,7 +2212,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2232,7 +2232,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2341,7 +2341,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2361,7 +2361,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2559,7 +2559,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2579,7 +2579,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2688,7 +2688,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2708,7 +2708,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -2917,7 +2917,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -2937,7 +2937,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -3235,7 +3235,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -3255,7 +3255,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -3555,7 +3555,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -3575,7 +3575,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -3885,7 +3885,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -3905,7 +3905,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -4299,7 +4299,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -4319,7 +4319,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -4726,7 +4726,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -4746,7 +4746,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -4855,7 +4855,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -4875,7 +4875,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -5133,7 +5133,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -5153,7 +5153,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -5455,7 +5455,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -5475,7 +5475,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -6006,7 +6006,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -6026,7 +6026,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -6646,7 +6646,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -6666,7 +6666,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -7091,7 +7091,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -7111,7 +7111,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -7579,7 +7579,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -7599,7 +7599,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -8017,7 +8017,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -8037,7 +8037,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -8550,7 +8550,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -8570,7 +8570,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc @@ -8987,7 +8987,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde @@ -9007,7 +9007,7 @@ STAGE PLANS: serialization.ddl struct part_orc { i32 p_partkey, string p_name, string p_mfgr, string p_brand, string p_type, i32 p_size, string p_container, double p_retailprice, string p_comment} serialization.format 1 serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde - totalSize 2597 + totalSize 2599 #### A masked pattern was here #### serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde name: default.part_orc diff --git a/ql/src/test/results/clientpositive/windowing_decimal.q.out b/ql/src/test/results/clientpositive/windowing_decimal.q.out index 194a916..60563ba 100644 --- a/ql/src/test/results/clientpositive/windowing_decimal.q.out +++ b/ql/src/test/results/clientpositive/windowing_decimal.q.out @@ -57,8 +57,8 @@ from part_dec POSTHOOK: type: QUERY POSTHOOK: Input: default@part_dec #### A masked pattern was here #### -Manufacturer#1 1173.15 1173.15 2346.30 -Manufacturer#1 1173.15 1173.15 2346.30 +Manufacturer#1 1173.15 1173.15 2346.3 +Manufacturer#1 1173.15 1173.15 2346.3 Manufacturer#1 1414.42 1173.15 3760.72 Manufacturer#1 1602.59 1173.15 5363.31 Manufacturer#1 1632.66 1173.15 6995.97 @@ -76,7 +76,7 @@ Manufacturer#3 1922.98 1190.27 7532.61 Manufacturer#4 1206.26 1206.26 1206.26 Manufacturer#4 1290.35 1206.26 2496.61 Manufacturer#4 1375.42 1206.26 3872.03 -Manufacturer#4 1620.67 1206.26 5492.70 +Manufacturer#4 1620.67 1206.26 5492.7 Manufacturer#4 1844.92 1206.26 7337.62 Manufacturer#5 1018.1 1018.1 1018.1 Manufacturer#5 1464.48 1018.1 2482.58 @@ -97,8 +97,8 @@ from part_dec POSTHOOK: type: QUERY POSTHOOK: Input: default@part_dec #### A masked pattern was here #### -Manufacturer#1 1173.15 1173.15 2346.30 -Manufacturer#1 1173.15 1173.15 2346.30 +Manufacturer#1 1173.15 1173.15 2346.3 +Manufacturer#1 1173.15 1173.15 2346.3 Manufacturer#1 1414.42 1414.42 1414.42 Manufacturer#1 1602.59 1602.59 1602.59 Manufacturer#1 1632.66 1632.66 1632.66 diff --git a/ql/src/test/results/clientpositive/windowing_navfn.q.out b/ql/src/test/results/clientpositive/windowing_navfn.q.out index 4256640..531ab6b 100644 --- a/ql/src/test/results/clientpositive/windowing_navfn.q.out +++ b/ql/src/test/results/clientpositive/windowing_navfn.q.out @@ -277,13 +277,13 @@ POSTHOOK: Input: default@over10k 65536 98.42 65536 0.93 65536 83.48 -65536 75.70 +65536 75.7 65536 88.04 65536 94.09 65536 33.45 65536 44.41 65536 22.15 -65536 20.50 +65536 20.5 65536 58.86 65536 30.91 65536 74.47 @@ -300,9 +300,9 @@ POSTHOOK: Input: default@over10k 65536 80.26 65536 35.07 65536 95.88 -65536 30.60 +65536 30.6 65536 46.97 -65536 58.80 +65536 58.8 65536 5.72 65536 29.27 65536 62.25 @@ -326,7 +326,7 @@ POSTHOOK: Input: default@over10k 65537 35.86 65537 47.75 65537 1.12 -65537 52.90 +65537 52.9 65537 53.92 65537 43.45 65537 7.52 @@ -340,20 +340,20 @@ POSTHOOK: Input: default@over10k 65537 56.48 65537 83.21 65537 56.52 -65537 36.60 -65537 59.70 +65537 36.6 +65537 59.7 65537 80.14 -65537 66.30 +65537 66.3 65537 94.87 65537 40.92 -65537 25.20 +65537 25.2 65537 7.36 65538 NULL 65538 53.35 65538 54.64 65538 76.67 65538 15.17 -65538 1.20 +65538 1.2 65538 13.71 65538 81.59 65538 43.33 diff --git a/ql/src/test/results/clientpositive/windowing_rank.q.out b/ql/src/test/results/clientpositive/windowing_rank.q.out index 67975f3..6a74a8e 100644 --- a/ql/src/test/results/clientpositive/windowing_rank.q.out +++ b/ql/src/test/results/clientpositive/windowing_rank.q.out @@ -508,16 +508,16 @@ where rnk = 1 limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k #### A masked pattern was here #### -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 -2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.5 1 PREHOOK: query: select ts, dec, rnk from (select ts, dec, @@ -546,16 +546,16 @@ where dec = 89.5 limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k #### A masked pattern was here #### -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 -2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.5 1 PREHOOK: query: select ts, dec, rnk from (select ts, dec, @@ -586,13 +586,13 @@ where rnk = 1 limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k #### A masked pattern was here #### -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 -2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.3 1 diff --git a/ql/src/test/results/compiler/parse/case_sensitivity.q.out b/ql/src/test/results/compiler/parse/case_sensitivity.q.out index c14fb1a..e69de29 100644 --- a/ql/src/test/results/compiler/parse/case_sensitivity.q.out +++ b/ql/src/test/results/compiler/parse/case_sensitivity.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME SRC_THRIFT))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR ([ (. (TOK_TABLE_OR_COL src_Thrift) LINT) 1)) (TOK_SELEXPR (. ([ (. (TOK_TABLE_OR_COL src_thrift) lintstring) 0) MYSTRING))) (TOK_WHERE (> ([ (. (TOK_TABLE_OR_COL src_thrift) liNT) 0) 0)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/cast1.q.out b/ql/src/test/results/compiler/parse/cast1.q.out index 5d37c9d..e69de29 100644 --- a/ql/src/test/results/compiler/parse/cast1.q.out +++ b/ql/src/test/results/compiler/parse/cast1.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (+ 3 2)) (TOK_SELEXPR (+ 3.0 2)) (TOK_SELEXPR (+ 3 2.0)) (TOK_SELEXPR (+ 3.0 2.0)) (TOK_SELEXPR (+ 3 (TOK_FUNCTION TOK_INT 2.0))) (TOK_SELEXPR (TOK_FUNCTION TOK_BOOLEAN 1)) (TOK_SELEXPR (TOK_FUNCTION TOK_INT TRUE))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) key) 86)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/groupby1.q.out b/ql/src/test/results/compiler/parse/groupby1.q.out index 1aa680c..e69de29 100755 --- a/ql/src/test/results/compiler/parse/groupby1.q.out +++ b/ql/src/test/results/compiler/parse/groupby1.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/groupby2.q.out b/ql/src/test/results/compiler/parse/groupby2.q.out index 2c70804..e69de29 100755 --- a/ql/src/test/results/compiler/parse/groupby2.q.out +++ b/ql/src/test/results/compiler/parse/groupby2.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)) (TOK_SELEXPR (TOK_FUNCTIONDI count (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION concat (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1) (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/groupby3.q.out b/ql/src/test/results/compiler/parse/groupby3.q.out index 057c101..e69de29 100644 --- a/ql/src/test/results/compiler/parse/groupby3.q.out +++ b/ql/src/test/results/compiler/parse/groupby3.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTIONDI avg (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION max (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5))) (TOK_SELEXPR (TOK_FUNCTION min (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/groupby4.q.out b/ql/src/test/results/compiler/parse/groupby4.q.out index d555d20..e69de29 100644 --- a/ql/src/test/results/compiler/parse/groupby4.q.out +++ b/ql/src/test/results/compiler/parse/groupby4.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1))) (TOK_GROUPBY (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) key) 1 1)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/groupby5.q.out b/ql/src/test/results/compiler/parse/groupby5.q.out index 0aa6914..e69de29 100644 --- a/ql/src/test/results/compiler/parse/groupby5.q.out +++ b/ql/src/test/results/compiler/parse/groupby5.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (TOK_FUNCTION sum (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5)))) (TOK_GROUPBY (. (TOK_TABLE_OR_COL src) key)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/groupby6.q.out b/ql/src/test/results/compiler/parse/groupby6.q.out index 52fa9b4..e69de29 100644 --- a/ql/src/test/results/compiler/parse/groupby6.q.out +++ b/ql/src/test/results/compiler/parse/groupby6.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECTDI (TOK_SELEXPR (TOK_FUNCTION substr (. (TOK_TABLE_OR_COL src) value) 5 1))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input1.q.out b/ql/src/test/results/compiler/parse/input1.q.out index fa3613e..e69de29 100755 --- a/ql/src/test/results/compiler/parse/input1.q.out +++ b/ql/src/test/results/compiler/parse/input1.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input2.q.out b/ql/src/test/results/compiler/parse/input2.q.out index 153f517..e69de29 100755 --- a/ql/src/test/results/compiler/parse/input2.q.out +++ b/ql/src/test/results/compiler/parse/input2.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (and (>= (. (TOK_TABLE_OR_COL src) key) 100) (< (. (TOK_TABLE_OR_COL src) key) 200)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest3) (TOK_PARTSPEC (TOK_PARTVAL ds '2008-04-08') (TOK_PARTVAL hr '12')))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR 2)) (TOK_WHERE (>= (. (TOK_TABLE_OR_COL src) key) 200)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input20.q.out b/ql/src/test/results/compiler/parse/input20.q.out index da4c1af..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input20.q.out +++ b/ql/src/test/results/compiler/parse/input20.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (% (. (TOK_TABLE_OR_COL src) key) 2) (% (. (TOK_TABLE_OR_COL src) key) 5)) TOK_SERDE TOK_RECORDWRITER 'cat' TOK_SERDE TOK_RECORDREADER))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL key)))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL tmap) key) (. (TOK_TABLE_OR_COL tmap) value)) TOK_SERDE TOK_RECORDWRITER 'uniq -c | sed "s@^ *@@" | sed "s@\t@_@" | sed "s@ @\t@"' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST key value)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input3.q.out b/ql/src/test/results/compiler/parse/input3.q.out index b033f3a..e69de29 100755 --- a/ql/src/test/results/compiler/parse/input3.q.out +++ b/ql/src/test/results/compiler/parse/input3.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest2))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (and (>= (. (TOK_TABLE_OR_COL src) key) 100) (< (. (TOK_TABLE_OR_COL src) key) 200)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest3) (TOK_PARTSPEC (TOK_PARTVAL ds '2008-04-08') (TOK_PARTVAL hr '12')))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR 2)) (TOK_WHERE (and (>= (. (TOK_TABLE_OR_COL src) key) 200) (< (. (TOK_TABLE_OR_COL src) key) 300)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR '../../../../build/contrib/hive/ql/test/data/warehouse/dest4.out')) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (>= (. (TOK_TABLE_OR_COL src) key) 300)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input4.q.out b/ql/src/test/results/compiler/parse/input4.q.out index c1425e7..e69de29 100755 --- a/ql/src/test/results/compiler/parse/input4.q.out +++ b/ql/src/test/results/compiler/parse/input4.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src) key) (. (TOK_TABLE_OR_COL src) value)) TOK_SERDE TOK_RECORDWRITER '/bin/cat' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL tkey)))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tkey)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tvalue))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL tmap) tkey) 100)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input5.q.out b/ql/src/test/results/compiler/parse/input5.q.out index 854907b..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input5.q.out +++ b/ql/src/test/results/compiler/parse/input5.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src_thrift))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_TRANSFORM (TOK_EXPLIST (. (TOK_TABLE_OR_COL src_thrift) lint) (. (TOK_TABLE_OR_COL src_thrift) lintstring)) TOK_SERDE TOK_RECORDWRITER '/bin/cat' TOK_SERDE TOK_RECORDREADER (TOK_ALIASLIST tkey tvalue)))) (TOK_CLUSTERBY (TOK_TABLE_OR_COL tkey)))) tmap)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tkey)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL tmap) tvalue))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input6.q.out b/ql/src/test/results/compiler/parse/input6.q.out index 35f73a6..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input6.q.out +++ b/ql/src/test/results/compiler/parse/input6.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value))) (TOK_WHERE (TOK_FUNCTION TOK_ISNULL (. (TOK_TABLE_OR_COL src1) key))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input7.q.out b/ql/src/test/results/compiler/parse/input7.q.out index a26964d..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input7.q.out +++ b/ql/src/test/results/compiler/parse/input7.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR TOK_NULL) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input8.q.out b/ql/src/test/results/compiler/parse/input8.q.out index a9a3bd1..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input8.q.out +++ b/ql/src/test/results/compiler/parse/input8.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (+ 4 TOK_NULL)) (TOK_SELEXPR (- (. (TOK_TABLE_OR_COL src1) key) TOK_NULL)) (TOK_SELEXPR (+ TOK_NULL TOK_NULL))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input9.q.out b/ql/src/test/results/compiler/parse/input9.q.out index b24ef54..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input9.q.out +++ b/ql/src/test/results/compiler/parse/input9.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src1))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR TOK_NULL) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key))) (TOK_WHERE (= TOK_NULL TOK_NULL)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input_part1.q.out b/ql/src/test/results/compiler/parse/input_part1.q.out index 880e0f8..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input_part1.q.out +++ b/ql/src/test/results/compiler/parse/input_part1.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) value)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) hr)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL srcpart) ds))) (TOK_WHERE (and (and (< (. (TOK_TABLE_OR_COL srcpart) key) 100) (= (. (TOK_TABLE_OR_COL srcpart) ds) '2008-04-08')) (= (. (TOK_TABLE_OR_COL srcpart) hr) '12'))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input_testsequencefile.q.out b/ql/src/test/results/compiler/parse/input_testsequencefile.q.out index 0b80da4..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input_testsequencefile.q.out +++ b/ql/src/test/results/compiler/parse/input_testsequencefile.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest4_sequencefile))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input_testxpath.q.out b/ql/src/test/results/compiler/parse/input_testxpath.q.out index aeb505b..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input_testxpath.q.out +++ b/ql/src/test/results/compiler/parse/input_testxpath.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src_thrift))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR ([ (. (TOK_TABLE_OR_COL src_thrift) lint) 1)) (TOK_SELEXPR (. ([ (. (TOK_TABLE_OR_COL src_thrift) lintstring) 0) mystring)) (TOK_SELEXPR ([ (. (TOK_TABLE_OR_COL src_thrift) mstringstring) 'key_2'))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/input_testxpath2.q.out b/ql/src/test/results/compiler/parse/input_testxpath2.q.out index d4c4610..e69de29 100644 --- a/ql/src/test/results/compiler/parse/input_testxpath2.q.out +++ b/ql/src/test/results/compiler/parse/input_testxpath2.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src_thrift))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION size (. (TOK_TABLE_OR_COL src_thrift) lint))) (TOK_SELEXPR (TOK_FUNCTION size (. (TOK_TABLE_OR_COL src_thrift) lintstring))) (TOK_SELEXPR (TOK_FUNCTION size (. (TOK_TABLE_OR_COL src_thrift) mstringstring)))) (TOK_WHERE (AND (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL src_thrift) lint)) (NOT (TOK_FUNCTION TOK_ISNULL (. (TOK_TABLE_OR_COL src_thrift) mstringstring))))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/join1.q.out b/ql/src/test/results/compiler/parse/join1.q.out index 1c45f30..e69de29 100644 --- a/ql/src/test/results/compiler/parse/join1.q.out +++ b/ql/src/test/results/compiler/parse/join1.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/join2.q.out b/ql/src/test/results/compiler/parse/join2.q.out index debbac3..e69de29 100644 --- a/ql/src/test/results/compiler/parse/join2.q.out +++ b/ql/src/test/results/compiler/parse/join2.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key))) (TOK_TABREF (TOK_TABNAME src) src3) (= (+ (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key)) (. (TOK_TABLE_OR_COL src3) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) value))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/join3.q.out b/ql/src/test/results/compiler/parse/join3.q.out index 51baa24..e69de29 100644 --- a/ql/src/test/results/compiler/parse/join3.q.out +++ b/ql/src/test/results/compiler/parse/join3.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_JOIN (TOK_JOIN (TOK_TABREF (TOK_TABNAME src) src1) (TOK_TABREF (TOK_TABNAME src) src2) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src2) key))) (TOK_TABREF (TOK_TABNAME src) src3) (= (. (TOK_TABLE_OR_COL src1) key) (. (TOK_TABLE_OR_COL src3) key)))) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) value))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/join4.q.out b/ql/src/test/results/compiler/parse/join4.q.out index 77f3486..e69de29 100644 --- a/ql/src/test/results/compiler/parse/join4.q.out +++ b/ql/src/test/results/compiler/parse/join4.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/join5.q.out b/ql/src/test/results/compiler/parse/join5.q.out index 4db56a4..e69de29 100644 --- a/ql/src/test/results/compiler/parse/join5.q.out +++ b/ql/src/test/results/compiler/parse/join5.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_RIGHTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/join6.q.out b/ql/src/test/results/compiler/parse/join6.q.out index 3114d62..e69de29 100644 --- a/ql/src/test/results/compiler/parse/join6.q.out +++ b/ql/src/test/results/compiler/parse/join6.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_FULLOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/join7.q.out b/ql/src/test/results/compiler/parse/join7.q.out index 9b12cec..e69de29 100644 --- a/ql/src/test/results/compiler/parse/join7.q.out +++ b/ql/src/test/results/compiler/parse/join7.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_FULLOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3))) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src3)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) key) c5) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src3) value) c6)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src3) key) 20) (< (. (TOK_TABLE_OR_COL src3) key) 25))))) c) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL c) c5)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c5) c5) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c6) c6)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c5)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c6))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/join8.q.out b/ql/src/test/results/compiler/parse/join8.q.out index aa4e64a..e69de29 100644 --- a/ql/src/test/results/compiler/parse/join8.q.out +++ b/ql/src/test/results/compiler/parse/join8.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_LEFTOUTERJOIN (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src1)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) key) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src1) value) c2)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src1) key) 10) (< (. (TOK_TABLE_OR_COL src1) key) 20))))) a) (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src) src2)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) key) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src2) value) c4)) (TOK_WHERE (and (> (. (TOK_TABLE_OR_COL src2) key) 15) (< (. (TOK_TABLE_OR_COL src2) key) 25))))) b) (= (. (TOK_TABLE_OR_COL a) c1) (. (TOK_TABLE_OR_COL b) c3)))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c1) c1) (TOK_SELEXPR (. (TOK_TABLE_OR_COL a) c2) c2) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c3) c3) (TOK_SELEXPR (. (TOK_TABLE_OR_COL b) c4) c4)))) c)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c1)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c2)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c3)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL c) c4))) (TOK_WHERE (AND (TOK_FUNCTION TOK_ISNULL (. (TOK_TABLE_OR_COL c) c3)) (TOK_FUNCTION TOK_ISNOTNULL (. (TOK_TABLE_OR_COL c) c1)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/sample1.q.out b/ql/src/test/results/compiler/parse/sample1.q.out index 9a493ad..e69de29 100644 --- a/ql/src/test/results/compiler/parse/sample1.q.out +++ b/ql/src/test/results/compiler/parse/sample1.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcpart) (TOK_TABLEBUCKETSAMPLE 1 1 (TOK_FUNCTION rand)) s)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME s)))) (TOK_WHERE (and (= (. (TOK_TABLE_OR_COL s) ds) '2008-04-08') (= (. (TOK_TABLE_OR_COL s) hr) '11'))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/sample2.q.out b/ql/src/test/results/compiler/parse/sample2.q.out index e67c761..e69de29 100644 --- a/ql/src/test/results/compiler/parse/sample2.q.out +++ b/ql/src/test/results/compiler/parse/sample2.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcbucket) (TOK_TABLEBUCKETSAMPLE 1 2) s)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME s)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/sample3.q.out b/ql/src/test/results/compiler/parse/sample3.q.out index ad5855b..e69de29 100644 --- a/ql/src/test/results/compiler/parse/sample3.q.out +++ b/ql/src/test/results/compiler/parse/sample3.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcbucket) (TOK_TABLEBUCKETSAMPLE 1 2 (TOK_TABLE_OR_COL key) (TOK_TABLE_OR_COL value)) s)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME s)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/sample4.q.out b/ql/src/test/results/compiler/parse/sample4.q.out index 790b009..e69de29 100644 --- a/ql/src/test/results/compiler/parse/sample4.q.out +++ b/ql/src/test/results/compiler/parse/sample4.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcbucket) (TOK_TABLEBUCKETSAMPLE 1 2 (TOK_TABLE_OR_COL key)) s)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME s)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/sample5.q.out b/ql/src/test/results/compiler/parse/sample5.q.out index cb55074..e69de29 100644 --- a/ql/src/test/results/compiler/parse/sample5.q.out +++ b/ql/src/test/results/compiler/parse/sample5.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcbucket) (TOK_TABLEBUCKETSAMPLE 1 5 (TOK_TABLE_OR_COL key)) s)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME s)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/sample6.q.out b/ql/src/test/results/compiler/parse/sample6.q.out index 3562bb8..e69de29 100644 --- a/ql/src/test/results/compiler/parse/sample6.q.out +++ b/ql/src/test/results/compiler/parse/sample6.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcbucket) (TOK_TABLEBUCKETSAMPLE 1 4 (TOK_TABLE_OR_COL key)) s)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME s)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/sample7.q.out b/ql/src/test/results/compiler/parse/sample7.q.out index 6bcf840..e69de29 100644 --- a/ql/src/test/results/compiler/parse/sample7.q.out +++ b/ql/src/test/results/compiler/parse/sample7.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME srcbucket) (TOK_TABLEBUCKETSAMPLE 1 4 (TOK_TABLE_OR_COL key)) s)) (TOK_INSERT (TOK_DESTINATION (TOK_TAB (TOK_TABNAME dest1))) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME s)))) (TOK_WHERE (> (. (TOK_TABLE_OR_COL s) key) 100)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/subq.q.out b/ql/src/test/results/compiler/parse/subq.q.out index afdcb57..e69de29 100644 --- a/ql/src/test/results/compiler/parse/subq.q.out +++ b/ql/src/test/results/compiler/parse/subq.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100)))) unioninput)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR '../build/ql/test/data/warehouse/union.out')) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME unioninput)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/udf1.q.out b/ql/src/test/results/compiler/parse/udf1.q.out index a9084e1..e69de29 100644 --- a/ql/src/test/results/compiler/parse/udf1.q.out +++ b/ql/src/test/results/compiler/parse/udf1.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (LIKE 'a' '%a%')) (TOK_SELEXPR (LIKE 'b' '%a%')) (TOK_SELEXPR (LIKE 'ab' '%a%')) (TOK_SELEXPR (LIKE 'ab' '%a_')) (TOK_SELEXPR (LIKE '%_' '\%\_')) (TOK_SELEXPR (LIKE 'ab' '\%\_')) (TOK_SELEXPR (LIKE 'ab' '_a%')) (TOK_SELEXPR (LIKE 'ab' 'a')) (TOK_SELEXPR (RLIKE '' '.*')) (TOK_SELEXPR (RLIKE 'a' '[ab]')) (TOK_SELEXPR (RLIKE '' '[ab]')) (TOK_SELEXPR (RLIKE 'hadoop' '[a-z]*')) (TOK_SELEXPR (RLIKE 'hadoop' 'o*')) (TOK_SELEXPR (TOK_FUNCTION REGEXP_REPLACE 'abc' 'b' 'c')) (TOK_SELEXPR (TOK_FUNCTION REGEXP_REPLACE 'abc' 'z' 'a')) (TOK_SELEXPR (TOK_FUNCTION REGEXP_REPLACE 'abbbb' 'bb' 'b')) (TOK_SELEXPR (TOK_FUNCTION REGEXP_REPLACE 'hadoop' '(.)[a-z]*' '$1ive'))) (TOK_WHERE (= (. (TOK_TABLE_OR_COL src) key) 86)))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/udf4.q.out b/ql/src/test/results/compiler/parse/udf4.q.out index 6065692..e69de29 100644 --- a/ql/src/test/results/compiler/parse/udf4.q.out +++ b/ql/src/test/results/compiler/parse/udf4.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME dest1))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION round 1.0)) (TOK_SELEXPR (TOK_FUNCTION round 1.5)) (TOK_SELEXPR (TOK_FUNCTION round (- 1.5))) (TOK_SELEXPR (TOK_FUNCTION floor 1.0)) (TOK_SELEXPR (TOK_FUNCTION floor 1.5)) (TOK_SELEXPR (TOK_FUNCTION floor (- 1.5))) (TOK_SELEXPR (TOK_FUNCTION sqrt 1.0)) (TOK_SELEXPR (TOK_FUNCTION sqrt (- 1.0))) (TOK_SELEXPR (TOK_FUNCTION sqrt 0.0)) (TOK_SELEXPR (TOK_FUNCTION ceil 1.0)) (TOK_SELEXPR (TOK_FUNCTION ceil 1.5)) (TOK_SELEXPR (TOK_FUNCTION ceil (- 1.5))) (TOK_SELEXPR (TOK_FUNCTION ceiling 1.0)) (TOK_SELEXPR (TOK_FUNCTION rand 3)) (TOK_SELEXPR (+ 3)) (TOK_SELEXPR (- 3)) (TOK_SELEXPR (+ 1 (+ 2))) (TOK_SELEXPR (+ 1 (- 2))) (TOK_SELEXPR (~ 1))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/udf6.q.out b/ql/src/test/results/compiler/parse/udf6.q.out index 4adc7ba..e69de29 100644 --- a/ql/src/test/results/compiler/parse/udf6.q.out +++ b/ql/src/test/results/compiler/parse/udf6.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION CONCAT 'a' 'b')) (TOK_SELEXPR (+ (TOK_FUNCTION IF TRUE 1 2) (TOK_TABLE_OR_COL key)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/udf_case.q.out b/ql/src/test/results/compiler/parse/udf_case.q.out index f8de5fc..e69de29 100644 --- a/ql/src/test/results/compiler/parse/udf_case.q.out +++ b/ql/src/test/results/compiler/parse/udf_case.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION CASE 1 1 2 3 4 5)) (TOK_SELEXPR (TOK_FUNCTION CASE 11 12 13 14 15))) (TOK_LIMIT 1))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/udf_when.q.out b/ql/src/test/results/compiler/parse/udf_when.q.out index a527fa4..e69de29 100644 --- a/ql/src/test/results/compiler/parse/udf_when.q.out +++ b/ql/src/test/results/compiler/parse/udf_when.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION WHEN (= 1 1) 2 (= 3 5) 4 5)) (TOK_SELEXPR (TOK_FUNCTION WHEN (= 12 11) 13 (= 14 10) 15))) (TOK_LIMIT 1))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/parse/union.q.out b/ql/src/test/results/compiler/parse/union.q.out index d9be739..e69de29 100644 --- a/ql/src/test/results/compiler/parse/union.q.out +++ b/ql/src/test/results/compiler/parse/union.q.out @@ -1 +0,0 @@ -(TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_UNION (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) key)) (TOK_SELEXPR (. (TOK_TABLE_OR_COL src) value))) (TOK_WHERE (< (. (TOK_TABLE_OR_COL src) key) 100)))) (TOK_QUERY (TOK_FROM (TOK_TABREF (TOK_TABNAME src))) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME src)))) (TOK_WHERE (> (. (TOK_TABLE_OR_COL src) key) 100))))) unioninput)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR '../build/ql/test/data/warehouse/union.out')) (TOK_SELECT (TOK_SELEXPR (TOK_ALLCOLREF (TOK_TABNAME unioninput)))))) \ No newline at end of file diff --git a/ql/src/test/results/compiler/plan/case_sensitivity.q.xml b/ql/src/test/results/compiler/plan/case_sensitivity.q.xml index 53aea3c..e69de29 100644 --- a/ql/src/test/results/compiler/plan/case_sensitivity.q.xml +++ b/ql/src/test/results/compiler/plan/case_sensitivity.q.xml @@ -1,1496 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_5 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src_thrift - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - name - default.src_thrift - - - columns.types - - - - serialization.ddl - struct src_thrift { } - - - columns - - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - columns.comments - - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns.types - - - - location - #### A masked pattern was here #### - - - columns - - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - numRows - 11 - - - numFiles - 1 - - - serialization.ddl - struct src_thrift { } - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 0 - - - columns.comments - - - - totalSize - 3070 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - name - default.src_thrift - - - - - - - - - - - src_thrift - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - - - - - - - lintstring - - - src_thrift - - - - - - - - - myint - - - mystring - - - underscore_int - - - - - - - - - int - - - - - - - - - - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - - - - MYSTRING - - - false - - - - - - - - _col0 - - - - - - - lint - - - src_thrift - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - - - - int - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - lint - - - src_thrift - - - - - - - - - - - - - 0 - - - - - - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - boolean - - - - - - - - - FIL_4 - - - - - - - - - - - - - lint - - - src_thrift - - - - - - array<int> - - - - - - - lintstring - - - src_thrift - - - - - - array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - - - - - - - src_thrift - - - - - 2 - - - 4 - - - - - - - lint - - - lintstring - - - - - - - - - - TS_0 - - - - - lint - - - lintstring - - - - - - - - - - aint - - - src_thrift - - - - - - int - - - - - - - astring - - - src_thrift - - - - - - string - - - - - - - - - - lstring - - - src_thrift - - - - - - - - - - array<string> - - - - - - - - - - mstringstring - - - src_thrift - - - - - - - - - - - - - map<string,string> - - - - - - - attributes - - - src_thrift - - - - - - - - - - - - - - - - - - - - - - - - - - - bigint - - - - - - - - - - double - - - - - - - - - - - - - - - - - - - - - - - - map<string,map<string,map<string,uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>>>>> - - - - - - - unionfield1 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - unionfield2 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - unionfield3 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src_thrift - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src_thrift - - - - - - string - - - - - - - true - - - ROW__ID - - - src_thrift - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src_thrift - - - - - - - #### A masked pattern was here #### - - - src_thrift - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - columns.types - - - - location - #### A masked pattern was here #### - - - columns - - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - numRows - 11 - - - numFiles - 1 - - - serialization.ddl - struct src_thrift { } - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 0 - - - columns.comments - - - - totalSize - 3070 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - name - default.src_thrift - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/cast1.q.xml b/ql/src/test/results/compiler/plan/cast1.q.xml index 1bba170..e69de29 100644 --- a/ql/src/test/results/compiler/plan/cast1.q.xml +++ b/ql/src/test/results/compiler/plan/cast1.q.xml @@ -1,1001 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4,_col5,_col6 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:double:double:double:int:boolean:int - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - _col0 - - - - - - - - int - - - - - int - - - - - - - _col1 - - - - - - - - double - - - - - double - - - - - - - _col2 - - - - - - - - - double - - - - - - - _col3 - - - - - - - - - double - - - - - - - _col4 - - - - - - - - - int - - - - - - - _col5 - - - - - - - - boolean - - - - - boolean - - - - - - - _col6 - - - - - - - - - int - - - - - - - - - - - - - _col6 - - - UDFToInteger(true) - - - - - - 1 - - - - - _col5 - - - UDFToBoolean(1) - - - - - - true - - - - - _col4 - - - (3 + UDFToInteger(2.0)) - - - - - - 5 - - - - - _col3 - - - (3.0 + 2.0) - - - - - - 5.0 - - - - - _col2 - - - (3 + 2.0) - - - - - - 5.0 - - - - - _col1 - - - (3.0 + 2) - - - - - - 5.0 - - - - - _col0 - - - (3 + 2) - - - - - - 5 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - _col5 - - - _col6 - - - - - - - SEL_2 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - _c2 - - - _col2 - - - - - - double - - - - - - - _c3 - - - _col3 - - - - - - double - - - - - - - _c4 - - - _col4 - - - - - - int - - - - - - - _c5 - - - _col5 - - - - - - boolean - - - - - - - _c6 - - - _col6 - - - - - - int - - - - - - - - - - - - - - - - - - - key - - - src - - - - - string - - - - - - - - - - - - 86 - - - - - - - - - - - - - - - - - FIL_4 - - - - - - - - - - - - - key - - - key - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/groupby1.q.xml b/ql/src/test/results/compiler/plan/groupby1.q.xml index 3b7e69b..e69de29 100755 --- a/ql/src/test/results/compiler/plan/groupby1.q.xml +++ b/ql/src/test/results/compiler/plan/groupby1.q.xml @@ -1,1463 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - double - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - _col0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - double - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - KEY._col0 - - - - - - string - - - - - - - VALUE._col0 - - - - - - double - - - - - - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - PARTIAL1 - - - - - - - - - - - - value - - - src - - - - - - - - - - - - int - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - 0.5 - - - 1 - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - _col1 - - - - - - - GBY_2 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - double - - - - - - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE - - - true - - - src - - - - - bigint - - - - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - ROW__ID - - - ROW__ID - - - true - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - key - - - value - - - - - true - - - - - SEL_1 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - true - - - - - - -1 - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - NONE - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - _col1 - - - _col1 - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - - - - double - - - - - - - - - - - - - _col0 - - - KEY._col0 - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - FINAL - - - - - - - - VALUE._col0 - - - - - - - - - - - - - - 0.5 - - - - - - - - - - - - - 0.9 - - - - MERGEPARTIAL - - - - - - _col0 - - - _col1 - - - - - - - GBY_4 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - double - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/groupby2.q.xml b/ql/src/test/results/compiler/plan/groupby2.q.xml index 3a1164c..e69de29 100755 --- a/ql/src/test/results/compiler/plan/groupby2.q.xml +++ b/ql/src/test/results/compiler/plan/groupby2.q.xml @@ -1,1706 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - - - - VALUE._col1 - - - _col3 - - - - - - - - double - - - - - - - KEY._col1:0._col0 - - - _col1 - - - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - ++ - - - columns.types - string,uniontype<struct<_col0:string>> - - - - - - - -1 - - - 1 - - - -1 - - - - - _col0 - - - _col1 - - - - - - - _col1 - - - - - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - double - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - KEY._col0 - - - - - - string - - - - - - - KEY._col1:0._col0 - - - - - - string - - - - - - - VALUE._col1 - - - - - - double - - - - - - - - - - - - - _col1 - - - - - - - value - - - src - - - - - - - - - - - - int - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - _col0 - - - - - - - key - - - src - - - - - - - - - - - - - 1 - - - - - - - - - - 1 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount$GenericUDAFCountEvaluator - - - count - - - - PARTIAL1 - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - true - - - 0.5 - - - 1 - - - - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - GBY_2 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - bigint - - - - - bigint - - - - - - - _col3 - - - - - - - - - double - - - - - - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE - - - true - - - src - - - - - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - ROW__ID - - - ROW__ID - - - true - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - key - - - value - - - - - true - - - - - SEL_1 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:bigint:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - bigint - - - - - - - _col2 - - - - - - - - - string - - - - - - - - - - - - - _col2 - - - - - - - _col0 - - - - - - - - - - - - - _col2 - - - - - - - - - - - - - - - - - - - - - _col1 - - - _col1 - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - - - - - SEL_5 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - string - - - - - - - _c1 - - - _col1 - - - - - - bigint - - - - - - - _c2 - - - _col2 - - - - - - string - - - - - - - - - - - - - _col0 - - - KEY._col0 - - - - - - - - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFCount$GenericUDAFCountEvaluator - - - count - - - - COMPLETE - - - - - - - - KEY._col1:0._col0 - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - FINAL - - - - - - - - VALUE._col1 - - - - - - - - - - - - - - true - - - 0.5 - - - - - - - - - - - - - 0.9 - - - - MERGEPARTIAL - - - - - - _col0 - - - _col1 - - - _col2 - - - - - - - GBY_4 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - bigint - - - - - - - _col2 - - - - - - - - - double - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/groupby3.q.xml b/ql/src/test/results/compiler/plan/groupby3.q.xml index b33a2c6..e69de29 100644 --- a/ql/src/test/results/compiler/plan/groupby3.q.xml +++ b/ql/src/test/results/compiler/plan/groupby3.q.xml @@ -1,2114 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - VALUE._col4 - - - _col5 - - - - - - - - string - - - - - - - VALUE._col3 - - - _col4 - - - - - - - - - - - VALUE._col1 - - - _col2 - - - - - - - - - - count - - - sum - - - input - - - - - - - - - bigint - - - - - - - double - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - - - - KEY._col0:0._col0 - - - _col0 - - - - - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - uniontype<struct<_col0:string>> - - - - - - - -1 - - - 1 - - - - - _col0 - - - - - - - _col0 - - - _col1 - - - _col3 - - - _col4 - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1,_col3,_col4 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - double,struct<count:bigint,sum:double,input:string>,string,string - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - KEY._col0:0._col0 - - - - - - string - - - - - - - VALUE._col0 - - - - - - double - - - - - - - VALUE._col1 - - - - - - struct<count:bigint,sum:double,input:string> - - - - - - - VALUE._col3 - - - - - - string - - - - - - - VALUE._col4 - - - - - - string - - - - - - - - - - - - - _col0 - - - - - - - value - - - src - - - - - - - - - - - - int - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - PARTIAL1 - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage$GenericUDAFAverageEvaluatorDouble - - - avg - - - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage$GenericUDAFAverageEvaluatorDouble - - - avg - - - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax$GenericUDAFMaxEvaluator - - - max - - - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin$GenericUDAFMinEvaluator - - - min - - - - - - - - - - - - - - value - - - src - - - - - - - - - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - true - - - 0.5 - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - _col5 - - - - - - - GBY_2 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - double - - - - - - - _col2 - - - - - - - - - struct<count:bigint,sum:double,input:string> - - - - - - - _col3 - - - - - - - - - struct<count:bigint,sum:double,input:string> - - - - - - - _col4 - - - - - - - - - string - - - - - - - _col5 - - - - - - - - - string - - - - - - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE - - - true - - - src - - - - - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - ROW__ID - - - ROW__ID - - - true - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - - - - - - value - - - - - true - - - - - SEL_1 - - - - - - - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 1 - - - - - - - value - - - - - - - - - - TS_0 - - - - - value - - - - - - - - - - key - - - src - - - - - - string - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - double:double:double:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - _col0 - - - - - - - - - double - - - - - - - _col1 - - - - - - - - - double - - - - - - - _col2 - - - - - - - - - double - - - - - - - _col3 - - - - - - - - - string - - - - - - - _col4 - - - - - - - - - string - - - - - - - - - - - - - _col4 - - - _col4 - - - - - - - - - - - _col3 - - - _col3 - - - - - - - - - - - _col2 - - - _col2 - - - - - - - - - - - _col1 - - - _col1 - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - - - - - SEL_5 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - double - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - _c2 - - - _col2 - - - - - - double - - - - - - - _c3 - - - _col3 - - - - - - string - - - - - - - _c4 - - - _col4 - - - - - - string - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - FINAL - - - - - - - - VALUE._col0 - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage$GenericUDAFAverageEvaluatorDouble - - - avg - - - - - - - - - - VALUE._col1 - - - - - - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFAverage$GenericUDAFAverageEvaluatorDouble - - - avg - - - - COMPLETE - - - - - - - - KEY._col0:0._col0 - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMax$GenericUDAFMaxEvaluator - - - max - - - - - - - - - - VALUE._col3 - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMin$GenericUDAFMinEvaluator - - - min - - - - - - - - - - VALUE._col4 - - - - - - - - - - - - - - true - - - 0.5 - - - - - - - - - 0.9 - - - - MERGEPARTIAL - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - - - - - GBY_4 - - - - - - - - - - - - - _col0 - - - - - - - - - double - - - - - - - _col1 - - - - - - - - - double - - - - - - - _col2 - - - - - - - - - double - - - - - - - _col3 - - - - - - - - - string - - - - - - - _col4 - - - - - - - - - string - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/groupby4.q.xml b/ql/src/test/results/compiler/plan/groupby4.q.xml index a61b055..e69de29 100644 --- a/ql/src/test/results/compiler/plan/groupby4.q.xml +++ b/ql/src/test/results/compiler/plan/groupby4.q.xml @@ -1,1138 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - _col0 - - - - - - - - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - KEY._col0 - - - - - - string - - - - - - - - - - - - - _col0 - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 1 - - - - - - - - - - 1 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - - 0.5 - - - 1 - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - - - - - GBY_2 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE - - - true - - - src - - - - - bigint - - - - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - ROW__ID - - - ROW__ID - - - true - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - - - - - - key - - - - - true - - - - - SEL_1 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - SEL_5 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - string - - - - - - - - - - - - - _col0 - - - KEY._col0 - - - - - - - - - - - - - - - 0.5 - - - - - - - - - - - - - 0.9 - - - - MERGEPARTIAL - - - - - - _col0 - - - - - - - GBY_4 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/groupby5.q.xml b/ql/src/test/results/compiler/plan/groupby5.q.xml index 2946c67..e69de29 100644 --- a/ql/src/test/results/compiler/plan/groupby5.q.xml +++ b/ql/src/test/results/compiler/plan/groupby5.q.xml @@ -1,1341 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - double - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - _col0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - double - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - KEY._col0 - - - - - - string - - - - - - - VALUE._col0 - - - - - - double - - - - - - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - PARTIAL1 - - - - - - - - - - - - value - - - src - - - - - - - - - - - - int - - - - - 5 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - 0.5 - - - 1 - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - _col1 - - - - - - - GBY_2 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - double - - - - - - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE - - - true - - - src - - - - - bigint - - - - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - ROW__ID - - - ROW__ID - - - true - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - key - - - value - - - - - true - - - - - SEL_1 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:double - - - escape.delim - \ - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - double - - - - - - - - - - - - - _col1 - - - _col1 - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - key - - - _col0 - - - src - - - - - - string - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - - - - - - - _col0 - - - KEY._col0 - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSum$GenericUDAFSumDouble - - - sum - - - - FINAL - - - - - - - - VALUE._col0 - - - - - - - - - - - - - - 0.5 - - - - - - - - - - - - - 0.9 - - - - MERGEPARTIAL - - - - - - _col0 - - - _col1 - - - - - - - GBY_4 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - double - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/groupby6.q.xml b/ql/src/test/results/compiler/plan/groupby6.q.xml index 08bf86f..e69de29 100644 --- a/ql/src/test/results/compiler/plan/groupby6.q.xml +++ b/ql/src/test/results/compiler/plan/groupby6.q.xml @@ -1,1138 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - KEY._col0 - - - _col0 - - - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - _col0 - - - - - - - - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - KEY._col0 - - - - - - string - - - - - - - - - - - - - _col0 - - - - - - - value - - - src - - - - - - - - - - - - int - - - - - 5 - - - - - - - - - - 1 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFSubstr - - - substr - - - - - - - - - - - - - - - - - 0.5 - - - 1 - - - - - - - - - - - - - 0.9 - - - - HASH - - - - - - _col0 - - - - - - - GBY_2 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - BLOCK__OFFSET__INSIDE__FILE - - - BLOCK__OFFSET__INSIDE__FILE - - - true - - - src - - - - - bigint - - - - - - - INPUT__FILE__NAME - - - INPUT__FILE__NAME - - - true - - - src - - - - - - - - value - - - value - - - src - - - - - - - - ROW__ID - - - ROW__ID - - - true - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - - - key - - - key - - - src - - - - - - - - - - - - - - - - - - - - - value - - - - - true - - - - - SEL_1 - - - - - - - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 1 - - - - - - - value - - - - - - - - - - TS_0 - - - - - value - - - - - - - - - - key - - - src - - - - - - string - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - SEL_5 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - string - - - - - - - - - - - - - _col0 - - - KEY._col0 - - - - - - - - - - - - - - - 0.5 - - - - - - - - - - - - - 0.9 - - - - MERGEPARTIAL - - - - - - _col0 - - - - - - - GBY_4 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input1.q.xml b/ql/src/test/results/compiler/plan/input1.q.xml index 8b5d4fc..e69de29 100755 --- a/ql/src/test/results/compiler/plan/input1.q.xml +++ b/ql/src/test/results/compiler/plan/input1.q.xml @@ -1,1180 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_5 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 100 - - - - - - - - - - - - boolean - - - - - - - - - FIL_4 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input2.q.xml b/ql/src/test/results/compiler/plan/input2.q.xml index f106d80..e69de29 100755 --- a/ql/src/test/results/compiler/plan/input2.q.xml +++ b/ql/src/test/results/compiler/plan/input2.q.xml @@ -1,2677 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-9 - - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-5 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_11 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_10 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10006 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-8 - - - - - - - - - - - - - - Stage-7 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-15 - - - - - - - - - - - - - - - Stage-10 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - - - - - - - - - - - - - Stage-11 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest2 - - - columns.types - string:string - - - serialization.ddl - struct dest2 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_13 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_12 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10007 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest2 - - - columns.types - string:string - - - serialization.ddl - struct dest2 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-14 - - - - - - - - - - - - - - Stage-13 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-12 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-21 - - - - - - - - - - - - - - - Stage-16 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - - - - - - Stage-17 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest3 - - - columns.types - string:string - - - serialization.ddl - struct dest3 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - partition_columns.types - string:string - - - partition_columns - ds/hr - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_15 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_14 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10008 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest3 - - - columns.types - string:string - - - serialization.ddl - struct dest3 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - partition_columns.types - string:string - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-20 - - - - - - - - - - - - - - Stage-19 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - ds - 2008-04-08 - - - hr - 12 - - - - - true - - - - - - - - - - - - - - Stage-18 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-3 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 100 - - - - - - - - - - - - boolean - - - - - - - - - FIL_1 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - - 2 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 100 - - - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 200 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 3 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - ds=2008-04-08/hr=12/ - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_9 - - - - - - - - - - - - - - - - _col1 - - - - - - 2 - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_8 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - - - - int - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 200 - - - - - - - - - - - - - - - - - FIL_7 - - - - - - - - - - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input20.q.xml b/ql/src/test/results/compiler/plan/input20.q.xml index 845367d..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input20.q.xml +++ b/ql/src/test/results/compiler/plan/input20.q.xml @@ -1,1401 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - tmap:src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - tmap:src - - - - - - - - - - - - - - VALUE._col1 - - - _col1 - - - - - string - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - _col0 - - - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - key - - - VALUE._col0 - - - tmap - - - - - - string - - - - - - - value - - - VALUE._col1 - - - tmap - - - - - - string - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - org.apache.hadoop.hive.ql.exec.TextRecordWriter - - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - cat - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - KEY - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - serialization.format - 9 - - - columns.types - double,double - - - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - serialization.last.column.takes.rest - true - - - - - - - - - SCR_2 - - - - - - - - - - - - - key - - - _col0 - - - - - - string - - - - - - - value - - - _col1 - - - - - - string - - - - - - - - - - - - - _col1 - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 5 - - - - - - - - - false - - - - - - - double - - - - - - - _col0 - - - - - - - key - - - src - - - - - - - - - - - - - 2 - - - - - - - - - false - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_1 - - - - - - - - - - - - - _col0 - - - - - - double - - - - - - - _col1 - - - - - - double - - - - - - - - - - - - - src - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - tmap:src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_7 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - org.apache.hadoop.hive.ql.exec.TextRecordWriter - - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - uniq -c | sed "s@^ *@@" | sed "s@\t@_@" | sed "s@ @\t@" - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - KEY - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - - - - - - - SCR_6 - - - - - - - - - - - - - key - - - _col0 - - - - - - string - - - - - - - value - - - _col1 - - - - - - string - - - - - - - - - - - - - _col1 - - - VALUE._col1 - - - - - - - - _col0 - - - VALUE._col0 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_4 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input3.q.xml b/ql/src/test/results/compiler/plan/input3.q.xml index 0152d46f..e69de29 100755 --- a/ql/src/test/results/compiler/plan/input3.q.xml +++ b/ql/src/test/results/compiler/plan/input3.q.xml @@ -1,3260 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-10 - - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_14 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_13 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10008 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-9 - - - - - - - - - - - - - - Stage-8 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-7 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-16 - - - - - - - - - - - - - - - Stage-11 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - - - - - - - - - - - - - Stage-12 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest2 - - - columns.types - string:string - - - serialization.ddl - struct dest2 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_16 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_15 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10009 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest2 - - - columns.types - string:string - - - serialization.ddl - struct dest2 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-15 - - - - - - - - - - - - - - Stage-14 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-13 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-22 - - - - - - - - - - - - - - - Stage-17 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - - - - - - Stage-18 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest3 - - - columns.types - string:string - - - serialization.ddl - struct dest3 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - partition_columns.types - string:string - - - partition_columns - ds/hr - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_18 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_17 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10010 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest3 - - - columns.types - string:string - - - serialization.ddl - struct dest3 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - partition_columns.types - string:string - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-21 - - - - - - - - - - - - - - Stage-20 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - ds - 2008-04-08 - - - hr - 12 - - - - - true - - - - - - - - - - - - - - Stage-19 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-27 - - - - - - - - - - - Stage-3 - - - - - - - - - - - - - - - - - Stage-23 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string - - - - - - - 1 - - - - - FS_20 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - - - - - _col0 - - - - - - - TS_19 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10011 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-26 - - - - - - - - - - - - - - Stage-25 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - string - - - _col0 - - - true - - - - - - - - - - - Stage-24 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-4 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 100 - - - - - - - - - - - - boolean - - - - - - - - - FIL_1 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - - 2 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 100 - - - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 200 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 3 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - ds=2008-04-08/hr=12/ - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_9 - - - - - - - - - - - - - - - - _col1 - - - - - - 2 - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_8 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - - - - int - - - - - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 200 - - - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 300 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_7 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 4 - - - - #### A masked pattern was here #### - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_12 - - - - - - - - - - - - - - - - _col0 - - - value - - - src - - - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - SEL_11 - - - - - - - - - - - - - value - - - _col0 - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 300 - - - - - - - - - - - - - - - - - FIL_10 - - - - - - - - - - - - - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input4.q.xml b/ql/src/test/results/compiler/plan/input4.q.xml index f9d5340..e69de29 100755 --- a/ql/src/test/results/compiler/plan/input4.q.xml +++ b/ql/src/test/results/compiler/plan/input4.q.xml @@ -1,1367 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - tmap:src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - tmap:src - - - - - - - - - - - - - - - - - VALUE._col1 - - - _col1 - - - - - string - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - _col0 - - - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - tkey - - - VALUE._col0 - - - tmap - - - - - - string - - - - - - - tvalue - - - VALUE._col1 - - - tmap - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - - int - - - - - 100 - - - - - - - - - - - - boolean - - - - - - - - - FIL_8 - - - - - - - - - - - - - tkey - - - _col0 - - - - - - string - - - - - - - tvalue - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - org.apache.hadoop.hive.ql.exec.TextRecordWriter - - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - /bin/cat - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - KEY - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - - - - - - - SCR_2 - - - - - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_1 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - tmap:src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - true - - - - - - -1 - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - NONE - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_7 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - _col1 - - - VALUE._col1 - - - - - - - - _col0 - - - VALUE._col0 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_4 - - - - - - - - - - - - - _col0 - - - tmap - - - - - - string - - - - - - - _col1 - - - tmap - - - - - - string - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input5.q.xml b/ql/src/test/results/compiler/plan/input5.q.xml index a211641..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input5.q.xml +++ b/ql/src/test/results/compiler/plan/input5.q.xml @@ -1,1552 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - tmap:src_thrift - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - name - default.src_thrift - - - columns.types - - - - serialization.ddl - struct src_thrift { } - - - columns - - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - columns.comments - - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns.types - - - - location - #### A masked pattern was here #### - - - columns - - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - numRows - 11 - - - numFiles - 1 - - - serialization.ddl - struct src_thrift { } - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 0 - - - columns.comments - - - - totalSize - 3070 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - name - default.src_thrift - - - - - - - - - - - tmap:src_thrift - - - - - - - - - - - - - - VALUE._col1 - - - _col1 - - - - - string - - - - - - - VALUE._col0 - - - _col0 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - _col1 - - - - - - - - - _col0 - - - - - - - - - - - - - UNSET - - - - - -1 - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - tkey - - - VALUE._col0 - - - tmap - - - - - - string - - - - - - - tvalue - - - VALUE._col1 - - - tmap - - - - - - string - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - org.apache.hadoop.hive.ql.exec.TextRecordWriter - - - org.apache.hadoop.hive.ql.exec.TextRecordReader - - - /bin/cat - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - KEY - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.DelimitedJSONSerDe - - - serialization.format - 9 - - - columns.types - array<int>,array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - field.delim - 9 - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 9 - - - columns.types - string,string - - - - - - - - - SCR_2 - - - - - - - - - - - - - tkey - - - _col0 - - - - - - string - - - - - - - tvalue - - - _col1 - - - - - - string - - - - - - - - - - - - - _col1 - - - lintstring - - - src_thrift - - - - - - - - - myint - - - mystring - - - underscore_int - - - - - - - - - int - - - - - - - - - - - - - - - - - - - _col0 - - - lint - - - src_thrift - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_1 - - - - - - - - - - - - - _col0 - - - - - - array<int> - - - - - - - _col1 - - - - - - array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - - - - - - - src_thrift - - - - - 2 - - - 4 - - - - - - - lint - - - lintstring - - - - - - - - - - TS_0 - - - - - lint - - - lintstring - - - - - - - - - - aint - - - src_thrift - - - - - - int - - - - - - - astring - - - src_thrift - - - - - - string - - - - - - - lint - - - src_thrift - - - - - - array<int> - - - - - - - lstring - - - src_thrift - - - - - - - - - - array<string> - - - - - - - lintstring - - - src_thrift - - - - - - array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - mstringstring - - - src_thrift - - - - - - - - - - - - - map<string,string> - - - - - - - attributes - - - src_thrift - - - - - - - - - - - - - - - - - - - - - - - - - - - bigint - - - - - - - - - - double - - - - - - - boolean - - - - - - - - - - - - - - - - - - - - - map<string,map<string,map<string,uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>>>>> - - - - - - - unionfield1 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - unionfield2 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - unionfield3 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src_thrift - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src_thrift - - - - - - string - - - - - - - true - - - ROW__ID - - - src_thrift - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - tmap:src_thrift - - - - - - - #### A masked pattern was here #### - - - src_thrift - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - columns.types - - - - location - #### A masked pattern was here #### - - - columns - - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - numRows - 11 - - - numFiles - 1 - - - serialization.ddl - struct src_thrift { } - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 0 - - - columns.comments - - - - totalSize - 3070 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - name - default.src_thrift - - - - - - - - - - - - - - true - - - - - - -1 - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - NONE - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - _col1 - - - VALUE._col1 - - - - - - - - _col0 - - - VALUE._col0 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_4 - - - - - - - - - - - - - _col0 - - - tmap - - - - - - string - - - - - - - _col1 - - - tmap - - - - - - string - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input6.q.xml b/ql/src/test/results/compiler/plan/input6.q.xml index 5a459b6..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input6.q.xml +++ b/ql/src/test/results/compiler/plan/input6.q.xml @@ -1,1166 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_5 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src1 - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 191 - - - columns.comments - defaultdefault - - - numRows - 25 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 216 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src1 - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - src1 - - - - - - - - _col0 - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - key - - - _col0 - - - src1 - - - - - - string - - - - - - - _col1 - - - src1 - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - - - boolean - - - - - - - - - FIL_4 - - - - - - - - - - - - - key - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - int - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src1 - - - - - - - #### A masked pattern was here #### - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src1 - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 191 - - - columns.comments - defaultdefault - - - numRows - 25 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 216 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input7.q.xml b/ql/src/test/results/compiler/plan/input7.q.xml index 676a72e..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input7.q.xml +++ b/ql/src/test/results/compiler/plan/input7.q.xml @@ -1,1095 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_4 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_3 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src1 - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 191 - - - columns.comments - defaultdefault - - - numRows - 25 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 216 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src1 - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_2 - - - - - - - - - - - - - - - - _col1 - - - key - - - src1 - - - - - - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_1 - - - - - - - - - - - - - _col0 - - - - - void - - - - - void - - - - - - - _col1 - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - int - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src1 - - - - - - - #### A masked pattern was here #### - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src1 - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 191 - - - columns.comments - defaultdefault - - - numRows - 25 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 216 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input8.q.xml b/ql/src/test/results/compiler/plan/input8.q.xml index e9ca940..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input8.q.xml +++ b/ql/src/test/results/compiler/plan/input8.q.xml @@ -1,772 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src1 - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 191 - - - columns.comments - defaultdefault - - - numRows - 25 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 216 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src1 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - double:double:double - - - escape.delim - \ - - - - - - - 1 - - - - - FS_2 - - - - - - - - - - - - - _col0 - - - - - - - - double - - - - - double - - - - - - - _col1 - - - - - - - - - double - - - - - - - _col2 - - - - - - - - - double - - - - - - - - - - - - - _col2 - - - - - - - - - - - - - - - false - - - - - - - - - - _col1 - - - - - - - key - - - src1 - - - - - string - - - - - - - - - - - - - - false - - - - - - - - - - _col0 - - - - - - - - - int - - - - - 4 - - - - - - - - - - - - false - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - - - - - SEL_1 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - double - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - _c2 - - - _col2 - - - - - - double - - - - - - - - - - - - - src1 - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src1 - - - - - - - #### A masked pattern was here #### - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src1 - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 191 - - - columns.comments - defaultdefault - - - numRows - 25 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 216 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input9.q.xml b/ql/src/test/results/compiler/plan/input9.q.xml index 3b0c93b..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input9.q.xml +++ b/ql/src/test/results/compiler/plan/input9.q.xml @@ -1,1146 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_6 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_5 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src1 - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 191 - - - columns.comments - defaultdefault - - - numRows - 25 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 216 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src1 - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - key - - - src1 - - - - - - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - - - void - - - - - void - - - - - - - _col1 - - - src1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - boolean - - - - - - - - - FIL_4 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - - - - value - - - src1 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - int - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src1 - - - - - - - #### A masked pattern was here #### - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src1 - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 191 - - - columns.comments - defaultdefault - - - numRows - 25 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 216 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input_part1.q.xml b/ql/src/test/results/compiler/plan/input_part1.q.xml index 3640102..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input_part1.q.xml +++ b/ql/src/test/results/compiler/plan/input_part1.q.xml @@ -1,946 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - srcpart - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - ds - 2008-04-08 - - - hr - 12 - - - - - - - columns.types - string:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - partition_columns.types - string:string - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 500 - - - numFiles - 1 - - - serialization.ddl - struct srcpart { string key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - totalSize - 5812 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - partition_columns - ds/hr - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcpart - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.srcpart - - - columns.types - string:string - - - serialization.ddl - struct srcpart { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - partition_columns.types - string:string - - - columns.comments - defaultdefault - - - partition_columns - ds/hr - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - srcpart - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - - - - - - _col3 - - - ds - - - - - - 2008-04-08 - - - - - _col2 - - - hr - - - - - - 12 - - - - - _col1 - - - value - - - srcpart - - - - - - - - _col0 - - - key - - - srcpart - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - SEL_2 - - - - - - - - - - - - - key - - - _col0 - - - srcpart - - - - - - string - - - - - - - value - - - _col1 - - - srcpart - - - - - - string - - - - - - - hr - - - _col2 - - - srcpart - - - - - - string - - - - - - - ds - - - _col3 - - - srcpart - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - srcpart - - - - - - - - - - - - int - - - - - 100 - - - - - - - - - - - - boolean - - - - - - - - - FIL_4 - - - - - - - - - - - - - key - - - srcpart - - - - - - string - - - - - - - value - - - srcpart - - - - - - string - - - - - - - - - - - - - srcpart - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - ds - - - ds - - - srcpart - - - - - - string - - - - - - - hr - - - hr - - - srcpart - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - srcpart - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - srcpart - - - - - - string - - - - - - - true - - - ROW__ID - - - srcpart - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - srcpart - - - - - - - #### A masked pattern was here #### - - - hr=12 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - ds - 2008-04-08 - - - hr - 12 - - - - - - - columns.types - string:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - partition_columns.types - string:string - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 500 - - - numFiles - 1 - - - serialization.ddl - struct srcpart { string key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - totalSize - 5812 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - partition_columns - ds/hr - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcpart - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml b/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml index f9a8c6a..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml +++ b/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml @@ -1,1109 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - name - default.dest4_sequencefile - - - columns.types - string:string - - - serialization.ddl - struct dest4_sequencefile { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_4 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_3 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - name - default.dest4_sequencefile - - - columns.types - string:string - - - serialization.ddl - struct dest4_sequencefile { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_2 - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_1 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - int - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input_testxpath.q.xml b/ql/src/test/results/compiler/plan/input_testxpath.q.xml index 1848f95..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input_testxpath.q.xml +++ b/ql/src/test/results/compiler/plan/input_testxpath.q.xml @@ -1,1062 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src_thrift - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - name - default.src_thrift - - - columns.types - - - - serialization.ddl - struct src_thrift { } - - - columns - - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - columns.comments - - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns.types - - - - location - #### A masked pattern was here #### - - - columns - - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - numRows - 11 - - - numFiles - 1 - - - serialization.ddl - struct src_thrift { } - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 0 - - - columns.comments - - - - totalSize - 3070 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - name - default.src_thrift - - - - - - - - - - - src_thrift - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_2 - - - - - - - - - - - - - _col0 - - - - - - - - int - - - - - int - - - - - - - _col1 - - - - - - - - string - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - - - - - - - _col2 - - - - - - - mstringstring - - - src_thrift - - - - - - - - - - - - - - - - - - - - key_2 - - - - - - - - - - - - - - - _col1 - - - - - - - - - lintstring - - - src_thrift - - - - - - - - - myint - - - mystring - - - underscore_int - - - - - - - - - - - - - - - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - - - - mystring - - - false - - - - - - - - _col0 - - - - - - - lint - - - src_thrift - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - - - - - SEL_1 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - - - - - - mystring - - - _col1 - - - - - - string - - - - - - - _c2 - - - _col2 - - - - - - string - - - - - - - - - - - - - src_thrift - - - - - 2 - - - 4 - - - 5 - - - - - - - lint - - - lintstring - - - mstringstring - - - - - - - - - - TS_0 - - - - - lint - - - lintstring - - - mstringstring - - - - - - - - - - aint - - - src_thrift - - - - - - int - - - - - - - astring - - - src_thrift - - - - - - string - - - - - - - lint - - - src_thrift - - - - - - array<int> - - - - - - - lstring - - - src_thrift - - - - - - - - - - array<string> - - - - - - - lintstring - - - src_thrift - - - - - - array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - mstringstring - - - src_thrift - - - - - - map<string,string> - - - - - - - attributes - - - src_thrift - - - - - - - - - - - - - - - - - - - - - - - - - - - bigint - - - - - - - - - - double - - - - - - - boolean - - - - - - - - - - - - - - - - - - - - - map<string,map<string,map<string,uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>>>>> - - - - - - - unionfield1 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - unionfield2 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - unionfield3 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src_thrift - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src_thrift - - - - - - string - - - - - - - true - - - ROW__ID - - - src_thrift - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src_thrift - - - - - - - #### A masked pattern was here #### - - - src_thrift - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - columns.types - - - - location - #### A masked pattern was here #### - - - columns - - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - numRows - 11 - - - numFiles - 1 - - - serialization.ddl - struct src_thrift { } - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 0 - - - columns.comments - - - - totalSize - 3070 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - name - default.src_thrift - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/input_testxpath2.q.xml b/ql/src/test/results/compiler/plan/input_testxpath2.q.xml index d2fa662..e69de29 100644 --- a/ql/src/test/results/compiler/plan/input_testxpath2.q.xml +++ b/ql/src/test/results/compiler/plan/input_testxpath2.q.xml @@ -1,1134 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src_thrift - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - name - default.src_thrift - - - columns.types - - - - serialization.ddl - struct src_thrift { } - - - columns - - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - columns.comments - - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns.types - - - - location - #### A masked pattern was here #### - - - columns - - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - numRows - 11 - - - numFiles - 1 - - - serialization.ddl - struct src_thrift { } - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 0 - - - columns.comments - - - - totalSize - 3070 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - name - default.src_thrift - - - - - - - - - - - src_thrift - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:int:int - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - _col0 - - - - - - - - int - - - - - int - - - - - - - _col1 - - - - - - - - - int - - - - - - - _col2 - - - - - - - - - int - - - - - - - - - - - - - _col2 - - - - - - - mstringstring - - - src_thrift - - - - - - - string - - - - - - - - - - - - - - - - - - - - - - _col1 - - - - - - - lintstring - - - src_thrift - - - - - - - - - myint - - - mystring - - - underscore_int - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - - - - - lint - - - src_thrift - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - - - - - SEL_2 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - - - - - - _c1 - - - _col1 - - - - - - int - - - - - - - _c2 - - - _col2 - - - - - - int - - - - - - - - - - - - - - - - - - - - - - - lint - - - src_thrift - - - - - - - - - - - - - - - boolean - - - - - - - - - - - - - - - - - mstringstring - - - src_thrift - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_4 - - - - - - - - - - - - - lint - - - src_thrift - - - - - - array<int> - - - - - - - lintstring - - - src_thrift - - - - - - array<struct<myint:int,mystring:string,underscore_int:int>> - - - - - - - mstringstring - - - src_thrift - - - - - - map<string,string> - - - - - - - - - - - - - src_thrift - - - - - 2 - - - 4 - - - 5 - - - - - - - lint - - - lintstring - - - mstringstring - - - - - - - - - - TS_0 - - - - - lint - - - lintstring - - - mstringstring - - - - - - - - - - aint - - - src_thrift - - - - - - int - - - - - - - astring - - - src_thrift - - - - - - string - - - - - - - - - - lstring - - - src_thrift - - - - - - - - - - array<string> - - - - - - - - - - - - - attributes - - - src_thrift - - - - - - - - - - - - - - - - - - - - - - - - - - - bigint - - - - - - - - - - double - - - - - - - - - - - - - - - - - - - - - - - - map<string,map<string,map<string,uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>>>>> - - - - - - - unionfield1 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - unionfield2 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - unionfield3 - - - src_thrift - - - - - - uniontype<int,bigint,string,double,boolean,array<string>,map<string,string>> - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src_thrift - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src_thrift - - - - - - string - - - - - - - true - - - ROW__ID - - - src_thrift - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src_thrift - - - - - - - #### A masked pattern was here #### - - - src_thrift - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - columns.types - - - - location - #### A masked pattern was here #### - - - columns - - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - org.apache.thrift.protocol.TBinaryProtocol - - - numRows - 11 - - - numFiles - 1 - - - serialization.ddl - struct src_thrift { } - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 0 - - - columns.comments - - - - totalSize - 3070 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer - - - serialization.class - org.apache.hadoop.hive.serde2.thrift.test.Complex - - - file.inputformat - org.apache.hadoop.mapred.SequenceFileInputFormat - - - name - default.src_thrift - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/join1.q.xml b/ql/src/test/results/compiler/plan/join1.q.xml index 9a1c202..e69de29 100644 --- a/ql/src/test/results/compiler/plan/join1.q.xml +++ b/ql/src/test/results/compiler/plan/join1.q.xml @@ -1,1700 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src2 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - src2 - - - - - - - - - - - VALUE._col0 - - - value - - - src2 - - - - - string - - - - - - - KEY.reducesinkkey0 - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_5 - - - - - - - - - - - - - VALUE._col0 - - - src2 - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - - - boolean - - - - - - - - - FIL_9 - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - - - - src2 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src2 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src2 - - - - - - string - - - - - - - true - - - ROW__ID - - - src2 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - int - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - src1 - - - - - - - - - - - KEY.reducesinkkey0 - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - - - RS_3 - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - FIL_10 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - - - - - key - - - - - - - - - - TS_1 - - - - - key - - - - - - - - - - - - - value - - - src1 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src2 - - - src1 - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - true - - - - - - true - - - -1 - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - NONE - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_8 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - _col1 - - - _col6 - - - src2 - - - - - - - - _col0 - - - _col0 - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_7 - - - - - - - - - - - - - _col0 - - - src1 - - - - - - string - - - - - - - _col1 - - - src2 - - - - - - string - - - - - - - - - - - - - _col6 - - - VALUE._col0 - - - src2 - - - - - - - - _col0 - - - KEY.reducesinkkey0 - - - src1 - - - - - - - - - - - - - - - - 1 - - - - - - - - - 0 - - - - - - - - 1 - - - - - - - - - - - - - - - 0 - - - - 1 - - - - - - true - - - - - - - - _col0 - - - _col6 - - - - - - - _col8 - 1 - - - _col7 - 1 - - - _col6 - 1 - - - _col5 - 1 - - - _col4 - 0 - - - _col3 - 0 - - - _col2 - 0 - - - _col1 - 0 - - - _col0 - 0 - - - _col9 - 1 - - - - - - - 0 - - - 1 - - - - - - - JOIN_6 - - - - - - - - - - - - - 0 - - - src1 - - - - - 1 - - - src2 - - - - - - - - - - - - _col0 - - - src1 - - - - - - string - - - - - - - _col6 - - - src2 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/join2.q.xml b/ql/src/test/results/compiler/plan/join2.q.xml index 28a7d8f..e69de29 100644 --- a/ql/src/test/results/compiler/plan/join2.q.xml +++ b/ql/src/test/results/compiler/plan/join2.q.xml @@ -1,2941 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - - - - - - - - - Stage-3 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - true - - - - - src3 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - $INTNAME - - - - - - - - VALUE._col0 - - - _col0 - - - src1 - - - - - string - - - - - - - KEY.reducesinkkey0 - - - - - - - _col0 - - - src1 - - - - - - - - - - _col5 - - - src2 - - - - - - - - - - - - false - - - - - - - double - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - double - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_9 - - - - - - - - - - - - - VALUE._col0 - - - src1 - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - _col0 - - - _col5 - - - - - - - TS_19 - - - - - - - - - - - _col0 - - - src1 - - - - - - string - - - - - - - _col5 - - - src2 - - - - - - string - - - - - - - - - - src3 - - - - - - - - - - - VALUE._col1 - - - value - - - src3 - - - - - - - - KEY.reducesinkkey0 - - - - - - - key - - - src3 - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFToDouble - - - UDFToDouble - - - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - double - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col1 - - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_11 - - - - - - - - - - - - - VALUE._col1 - - - src3 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src3 - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFToDouble - - - UDFToDouble - - - - - - - - - - - - - - - - - boolean - - - - - - - - - FIL_16 - - - - - - - - - - - - - key - - - src3 - - - - - - string - - - - - - - value - - - src3 - - - - - - string - - - - - - - - - - - - - src3 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_1 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src3 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src3 - - - - - - string - - - - - - - true - - - ROW__ID - - - src3 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - int - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - $INTNAME - - - - - #### A masked pattern was here #### - - - src3 - - - - - - - #### A masked pattern was here #### - - - -mr-10002 - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0,_col5 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string,string - - - escape.delim - \ - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - true - - - - - - true - - - -1 - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - NONE - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_14 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - _col1 - - - _col11 - - - src3 - - - - - - - - _col0 - - - _col0 - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_13 - - - - - - - - - - - - - _col0 - - - src1 - - - - - - string - - - - - - - _col1 - - - src3 - - - - - - string - - - - - - - - - - - - - _col11 - - - VALUE._col1 - - - src3 - - - - - - - - _col0 - - - VALUE._col0 - - - src1 - - - - - - - - - - - - - - - - 1 - - - - - - - - - 0 - - - - - - - - 1 - - - - - - - - - - - - - - - 0 - - - - 1 - - - - - - true - - - - - - - - _col0 - - - _col11 - - - - - - - _col8 - 0 - - - _col7 - 0 - - - _col6 - 0 - - - _col5 - 0 - - - _col4 - 0 - - - _col3 - 0 - - - _col2 - 0 - - - _col1 - 0 - - - _col9 - 0 - - - _col13 - 1 - - - _col12 - 1 - - - _col11 - 1 - - - _col10 - 1 - - - _col14 - 1 - - - _col0 - 0 - - - - - - - 0 - - - 1 - - - - - - - JOIN_12 - - - - - - - - - - - - - 0 - - - src2 - - - src1 - - - - - 1 - - - src3 - - - - - - - - - - - - _col0 - - - src1 - - - - - - string - - - - - - - _col11 - - - src3 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - - - src2 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - src2 - - - - - - - - - - - KEY.reducesinkkey0 - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - - - - - - - - - - - - - - - 1 - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - - - RS_6 - - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - FIL_15 - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - - - - - - - src2 - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - - - - value - - - src2 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src2 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src2 - - - - - - string - - - - - - - true - - - ROW__ID - - - src2 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - src1 - - - - - - - - - - - KEY.reducesinkkey0 - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - - - RS_4 - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - FIL_17 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - - - - - key - - - - - - - - - - TS_2 - - - - - key - - - - - - - - - - - - - value - - - src1 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src2 - - - src1 - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - true - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - 1 - - - - - - 1 - - - - - FS_18 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_8 - - - - - - - - - - - - - - - - _col5 - - - KEY.reducesinkkey0 - - - src2 - - - - - - - - _col0 - - - KEY.reducesinkkey0 - - - src1 - - - - - - - - - - - - - - - - 1 - - - - - - - - - 0 - - - - - - - - 1 - - - - - - - - - - - - - - - 0 - - - - 1 - - - - - - true - - - - - - - - _col0 - - - _col5 - - - - - - - _col8 - 1 - - - _col7 - 1 - - - _col6 - 1 - - - _col5 - 1 - - - _col4 - 0 - - - _col3 - 0 - - - _col2 - 0 - - - _col1 - 0 - - - _col0 - 0 - - - _col9 - 1 - - - - - - - 0 - - - 1 - - - - - - - JOIN_7 - - - - - - - - - - - - - 0 - - - src1 - - - - - 1 - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/join3.q.xml b/ql/src/test/results/compiler/plan/join3.q.xml index 93914da..e69de29 100644 --- a/ql/src/test/results/compiler/plan/join3.q.xml +++ b/ql/src/test/results/compiler/plan/join3.q.xml @@ -1,2155 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - src2 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - src3 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - src2 - - - - - - - - - - - KEY.reducesinkkey0 - - - key - - - src2 - - - - - string - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - - - - - - - - - - - - UNSET - - - - - 1 - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - - - RS_6 - - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - - - boolean - - - - - - - - - FIL_12 - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - - - - - - - src2 - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - - - - value - - - src2 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src2 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src2 - - - - - - string - - - - - - - true - - - ROW__ID - - - src2 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - int - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - src3 - - - - - - - - - - - VALUE._col0 - - - value - - - src3 - - - - - - - - KEY.reducesinkkey0 - - - key - - - src3 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - - 2 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_8 - - - - - - - - - - - - - VALUE._col0 - - - src3 - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src3 - - - - - - - - - - - - - - - - - - - - FIL_13 - - - - - - - - - - - - - key - - - src3 - - - - - - string - - - - - - - value - - - src3 - - - - - - string - - - - - - - - - - - - - src3 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_1 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src3 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src3 - - - - - - string - - - - - - - true - - - ROW__ID - - - src3 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - src1 - - - - - - - - - - - KEY.reducesinkkey0 - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - - - - escape.delim - \ - - - - - - - - - RS_4 - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - FIL_14 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - - - - - key - - - - - - - - - - TS_2 - - - - - key - - - - - - - - - - - - - value - - - src1 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - src2 - - - src3 - - - src1 - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - true - - - - - - true - - - -1 - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - NONE - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_11 - - - - - - - - - - - - - key - - - - - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - _col1 - - - _col11 - - - src3 - - - - - - - - _col0 - - - _col0 - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_10 - - - - - - - - - - - - - _col0 - - - src1 - - - - - - string - - - - - - - _col1 - - - src3 - - - - - - string - - - - - - - - - - - - - _col11 - - - VALUE._col0 - - - src3 - - - - - - - - _col0 - - - KEY.reducesinkkey0 - - - src1 - - - - - - - - - - - - - - - - 1 - - - - - - - 2 - - - - - - - - - 0 - - - - - - - - 1 - - - - 2 - - - - - - - - - - - - - - - 0 - - - - 1 - - - - 2 - - - - - - true - - - - - - - - _col0 - - - _col11 - - - - - - - _col8 - 1 - - - _col7 - 1 - - - _col6 - 1 - - - _col5 - 1 - - - _col4 - 0 - - - _col3 - 0 - - - _col2 - 0 - - - _col1 - 0 - - - _col9 - 1 - - - _col13 - 2 - - - _col12 - 2 - - - _col11 - 2 - - - _col10 - 2 - - - _col14 - 2 - - - _col0 - 0 - - - - - - - 0 - - - 1 - - - 2 - - - - - - - JOIN_9 - - - - - - - - - - - - - - - - 0 - - - src1 - - - - - 1 - - - src2 - - - - - 2 - - - src3 - - - - - - - - - - - - _col0 - - - src1 - - - - - - string - - - - - - - _col11 - - - src3 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/join4.q.xml b/ql/src/test/results/compiler/plan/join4.q.xml index 63fcd64..e69de29 100644 --- a/ql/src/test/results/compiler/plan/join4.q.xml +++ b/ql/src/test/results/compiler/plan/join4.q.xml @@ -1,2075 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - c:a:src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - c:b:src2 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - c:a:src1 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - string - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_6 - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src1 - - - - - - - - _col0 - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - - - - - - - - - boolean - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_12 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_3 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - c:b:src2 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_7 - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src2 - - - - - - - - _col0 - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_13 - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - - - - src2 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src2 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src2 - - - - - - string - - - - - - - true - - - ROW__ID - - - src2 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - c:a:src1 - - - c:b:src2 - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - true - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_11 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - - - - - - _col3 - - - _col3 - - - - - - - - _col2 - - - _col2 - - - - - - - - _col1 - - - _col1 - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - SEL_9 - - - - - - - - - - - - - c1 - - - _col0 - - - c - - - - - - string - - - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - - - - - - - - _col3 - - - VALUE._col0 - - - - - - - - _col2 - - - KEY.reducesinkkey0 - - - - - - - - _col1 - - - VALUE._col0 - - - - - - - - _col0 - - - KEY.reducesinkkey0 - - - - - - - - - - - - - - - - 1 - - - 1 - - - - - - - - - 0 - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - 1 - - - - - - - - - 0 - - - - 1 - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - _col3 - 1 - - - _col2 - 1 - - - _col1 - 0 - - - _col0 - 0 - - - - - - - 0 - - - 1 - - - - - - - JOIN_8 - - - - - - - - - - - - - 0 - - - a - - - - - 1 - - - b - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - _col2 - - - - - - string - - - - - - - _col3 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/join5.q.xml b/ql/src/test/results/compiler/plan/join5.q.xml index 71487a6..e69de29 100644 --- a/ql/src/test/results/compiler/plan/join5.q.xml +++ b/ql/src/test/results/compiler/plan/join5.q.xml @@ -1,2071 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - c:a:src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - c:b:src2 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - c:a:src1 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - string - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_6 - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src1 - - - - - - - - _col0 - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - - - - - - - - - boolean - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_12 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_3 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - c:b:src2 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_7 - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src2 - - - - - - - - _col0 - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_13 - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - - - - src2 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src2 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src2 - - - - - - string - - - - - - - true - - - ROW__ID - - - src2 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - c:a:src1 - - - c:b:src2 - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - true - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_11 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - - - - - - _col3 - - - _col3 - - - - - - - - _col2 - - - _col2 - - - - - - - - _col1 - - - _col1 - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - SEL_9 - - - - - - - - - - - - - c1 - - - _col0 - - - c - - - - - - string - - - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - - - - - - - - _col3 - - - VALUE._col0 - - - - - - - - _col2 - - - KEY.reducesinkkey0 - - - - - - - - _col1 - - - VALUE._col0 - - - - - - - - _col0 - - - KEY.reducesinkkey0 - - - - - - - - - - - - - - - - 1 - - - 2 - - - - - - - - - 0 - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - - - - 0 - - - - 1 - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - _col3 - 1 - - - _col2 - 1 - - - _col1 - 0 - - - _col0 - 0 - - - - - - - 0 - - - 1 - - - - - - - JOIN_8 - - - - - - - - - - - - - 0 - - - a - - - - - 1 - - - b - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - _col2 - - - - - - string - - - - - - - _col3 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/join6.q.xml b/ql/src/test/results/compiler/plan/join6.q.xml index 3ff2610..e69de29 100644 --- a/ql/src/test/results/compiler/plan/join6.q.xml +++ b/ql/src/test/results/compiler/plan/join6.q.xml @@ -1,2078 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - c:a:src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - c:b:src2 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - c:a:src1 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - string - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_6 - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src1 - - - - - - - - _col0 - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - - - - - - - - - boolean - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_12 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_3 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - c:b:src2 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_7 - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src2 - - - - - - - - _col0 - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_13 - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - - - - src2 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src2 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src2 - - - - - - string - - - - - - - true - - - ROW__ID - - - src2 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - c:a:src1 - - - c:b:src2 - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - true - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_11 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - - - - - - _col3 - - - _col3 - - - - - - - - _col2 - - - _col2 - - - - - - - - _col1 - - - _col1 - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - SEL_9 - - - - - - - - - - - - - c1 - - - _col0 - - - c - - - - - - string - - - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - - - - - - - - _col3 - - - VALUE._col0 - - - - - - - - _col2 - - - KEY.reducesinkkey0 - - - - - - - - _col1 - - - VALUE._col0 - - - - - - - - _col0 - - - KEY.reducesinkkey0 - - - - - - - - - - - - - - - - 1 - - - 3 - - - - - - - - - 0 - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - 0 - - - - 1 - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - _col3 - 1 - - - _col2 - 1 - - - _col1 - 0 - - - _col0 - 0 - - - - - - - 0 - - - 1 - - - - - - - JOIN_8 - - - - - - - - - - - - - 0 - - - a - - - - - 1 - - - b - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - _col2 - - - - - - string - - - - - - - _col3 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/join7.q.xml b/ql/src/test/results/compiler/plan/join7.q.xml index 279147a..e69de29 100644 --- a/ql/src/test/results/compiler/plan/join7.q.xml +++ b/ql/src/test/results/compiler/plan/join7.q.xml @@ -1,2902 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - c:a:src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - c:b:src2 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - c:c:src3 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - c:a:src1 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - string - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_9 - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src1 - - - - - - - - _col0 - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_8 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - - - - - - - - - boolean - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_16 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_6 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - c:b:src2 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_10 - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src2 - - - - - - - - _col0 - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_17 - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - - - - src2 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src2 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src2 - - - - - - string - - - - - - - true - - - ROW__ID - - - src2 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - c:c:src3 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - - 2 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_11 - - - - - - - - - - - - - VALUE._col0 - - - c - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src3 - - - - - - - - _col0 - - - key - - - src3 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - key - - - src3 - - - - - - - - - - - - - 20 - - - - - - - - - - - - - - - - - - - - - key - - - src3 - - - - - - - - - - - - - 25 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_18 - - - - - - - - - - - - - key - - - src3 - - - - - - string - - - - - - - value - - - src3 - - - - - - string - - - - - - - - - - - - - src3 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_3 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src3 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src3 - - - - - - string - - - - - - - true - - - ROW__ID - - - src3 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - c:a:src1 - - - c:b:src2 - - - c:c:src3 - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - true - - - -1 - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4,_col5 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_15 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - _col4 - - - - - - - - - string - - - - - - - _col5 - - - - - - - - - string - - - - - - - - - - - - - _col5 - - - _col5 - - - - - - - - _col4 - - - _col4 - - - - - - - - _col3 - - - _col3 - - - - - - - - _col2 - - - _col2 - - - - - - - - _col1 - - - _col1 - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - _col5 - - - - - - - SEL_13 - - - - - - - - - - - - - c1 - - - _col0 - - - c - - - - - - string - - - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - - c5 - - - _col4 - - - c - - - - - - string - - - - - - - c6 - - - _col5 - - - c - - - - - - string - - - - - - - - - - - - - _col5 - - - VALUE._col0 - - - - - - - - _col4 - - - KEY.reducesinkkey0 - - - - - - - - _col3 - - - VALUE._col0 - - - - - - - - _col2 - - - KEY.reducesinkkey0 - - - - - - - - _col1 - - - VALUE._col0 - - - - - - - - _col0 - - - KEY.reducesinkkey0 - - - - - - - - - - - - - - - - 1 - - - 3 - - - - - - - 2 - - - 1 - - - - - - - - - 0 - - - - - - - - - - - 1 - - - - - - - - - - - 2 - - - - - - - - - - - - - - - - - 1 - - - 2 - - - - - - - - - - - - 0 - - - - 1 - - - - 2 - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - _col5 - - - - - - - _col5 - 2 - - - _col4 - 2 - - - _col3 - 1 - - - _col2 - 1 - - - _col1 - 0 - - - _col0 - 0 - - - - - - - 0 - - - 1 - - - 2 - - - - - - - JOIN_12 - - - - - - - - - - - - - - - - 0 - - - a - - - - - 1 - - - b - - - - - 2 - - - c - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - _col2 - - - - - - string - - - - - - - _col3 - - - - - - string - - - - - - - _col4 - - - - - - string - - - - - - - _col5 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/join8.q.xml b/ql/src/test/results/compiler/plan/join8.q.xml index c9a2de4..e69de29 100644 --- a/ql/src/test/results/compiler/plan/join8.q.xml +++ b/ql/src/test/results/compiler/plan/join8.q.xml @@ -1,2216 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - c:a:src1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - c:b:src2 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - c:a:src1 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - string - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - UNSET - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_6 - - - - - - - - - - - - - VALUE._col0 - - - a - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src1 - - - - - - - - _col0 - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_5 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - int - - - - - 10 - - - - - - - - - - - - boolean - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - 20 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - key - - - src1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_14 - - - - - - - - - - - - - key - - - src1 - - - - - - string - - - - - - - value - - - src1 - - - - - - string - - - - - - - - - - - - - src1 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_3 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src1 - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src1 - - - - - - string - - - - - - - true - - - ROW__ID - - - src1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - c:b:src2 - - - - - - - - - - - - - - VALUE._col0 - - - _col1 - - - - - - - - KEY.reducesinkkey0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - reducesinkkey0 - - - serialization.lib - org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe - - - serialization.sort.order - + - - - columns.types - string - - - - - - - -1 - - - 1 - - - -1 - - - - - reducesinkkey0 - - - - - - - _col0 - - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - org.apache.hadoop.mapred.SequenceFileInputFormat - - - org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - - - - - columns - _col0 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - - columns.types - string - - - escape.delim - \ - - - - - - - - - RS_7 - - - - - - - - - - - - - VALUE._col0 - - - b - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src2 - - - - - - - - _col0 - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 15 - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - 25 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - key - - - src2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_15 - - - - - - - - - - - - - key - - - src2 - - - - - - string - - - - - - - value - - - src2 - - - - - - string - - - - - - - - - - - - - src2 - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src2 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src2 - - - - - - string - - - - - - - true - - - ROW__ID - - - src2 - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - c:a:src1 - - - c:b:src2 - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - true - - - -1 - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_12 - - - - - - - - - - - - - _col0 - - - - - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - - - - - - _col3 - - - _col3 - - - - - - - - _col2 - - - - _col1 - - - _col1 - - - - - - - - _col0 - - - _col0 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - SEL_9 - - - - - - - - - - - - - c1 - - - _col0 - - - c - - - - - - string - - - - - - - c2 - - - _col1 - - - c - - - - - - string - - - - - - - c3 - - - _col2 - - - c - - - - - - string - - - - - - - c4 - - - _col3 - - - c - - - - - - string - - - - - - - - - - - - - - - - - - - _col2 - - - - - - - - - - - - - - - - - - - - FIL_13 - - - - - - - - - - - - - _col0 - - - - - - string - - - - - - - _col1 - - - - - - string - - - - - - - c3 - - - _col2 - - - b - - - - - - string - - - - - - - _col3 - - - - - - string - - - - - - - - - - - - - _col3 - - - VALUE._col0 - - - - - - - - _col2 - - - KEY.reducesinkkey0 - - - - - - - - _col1 - - - VALUE._col0 - - - - - - - - _col0 - - - KEY.reducesinkkey0 - - - - - - - - - - - - - - - - 1 - - - 1 - - - - - - - - - 0 - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - 1 - - - - - - - - - 0 - - - - 1 - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - - - _col3 - 1 - - - _col2 - 1 - - - _col1 - 0 - - - _col0 - 0 - - - - - - - 0 - - - 1 - - - - - - - JOIN_8 - - - - - - - - - - - - - 0 - - - a - - - - - 1 - - - b - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/sample1.q.xml b/ql/src/test/results/compiler/plan/sample1.q.xml index 0ab3b87..e69de29 100644 --- a/ql/src/test/results/compiler/plan/sample1.q.xml +++ b/ql/src/test/results/compiler/plan/sample1.q.xml @@ -1,1039 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - s - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - ds - 2008-04-08 - - - hr - 11 - - - - - - - columns.types - string:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - partition_columns.types - string:string - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 500 - - - numFiles - 1 - - - serialization.ddl - struct srcpart { string key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - totalSize - 5812 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - partition_columns - ds/hr - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcpart - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.srcpart - - - columns.types - string:string - - - serialization.ddl - struct srcpart { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - partition_columns.types - string:string - - - columns.comments - defaultdefault - - - partition_columns - ds/hr - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - s - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_4 - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - _col2 - - - - - - - - - string - - - - - - - _col3 - - - - - - - - - string - - - - - - - - - - - - - _col3 - - - hr - - - - - - 11 - - - - - _col2 - - - ds - - - - - - 2008-04-08 - - - - - _col1 - - - value - - - s - - - - - - - - _col0 - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - - - true - - - - - SEL_3 - - - - - - - - - - - - - key - - - _col0 - - - s - - - - - - string - - - - - - - value - - - _col1 - - - s - - - - - - string - - - - - - - ds - - - _col2 - - - s - - - - - - string - - - - - - - hr - - - _col3 - - - s - - - - - - string - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFRand - - - rand - - - - - - - double - - - - - - - - - - - - - - int - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 1 - - - - - - - - - false - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - boolean - - - - - - - - - FIL_1 - - - - - - - - - - - - - key - - - s - - - - - - string - - - - - - - value - - - s - - - - - - string - - - - - - - - - - - - - s - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - ds - - - ds - - - s - - - - - - string - - - - - - - hr - - - hr - - - s - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - s - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - s - - - - - - string - - - - - - - true - - - ROW__ID - - - s - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - s - - - - - - - #### A masked pattern was here #### - - - hr=11 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - ds - 2008-04-08 - - - hr - 11 - - - - - - - columns.types - string:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - partition_columns.types - string:string - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 500 - - - numFiles - 1 - - - serialization.ddl - struct srcpart { string key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - totalSize - 5812 - - - bucket_count - -1 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - partition_columns - ds/hr - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcpart - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/sample2.q.xml b/ql/src/test/results/compiler/plan/sample2.q.xml index 1b3a948..e69de29 100644 --- a/ql/src/test/results/compiler/plan/sample2.q.xml +++ b/ql/src/test/results/compiler/plan/sample2.q.xml @@ -1,1277 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_5 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_4 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - s - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.srcbucket - - - columns.types - int:string - - - bucket_field_name - key - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - columns - key,value - - - serialization.format - 1 - - - columns.comments - - - - bucket_count - 2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - s - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - s - - - - - - - - _col0 - - - key - - - s - - - - - int - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 2 - - - - - - - - - false - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - boolean - - - - - - - - - - - - FIL_1 - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - - - - s - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - s - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - s - - - - - - string - - - - - - - true - - - ROW__ID - - - s - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - s - - - - - - - #### A masked pattern was here #### - - - srcbucket0.txt - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/sample3.q.xml b/ql/src/test/results/compiler/plan/sample3.q.xml index 28e3208..e69de29 100644 --- a/ql/src/test/results/compiler/plan/sample3.q.xml +++ b/ql/src/test/results/compiler/plan/sample3.q.xml @@ -1,1287 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_5 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_4 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - s - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.srcbucket - - - columns.types - int:string - - - bucket_field_name - key - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - columns - key,value - - - serialization.format - 1 - - - columns.comments - - - - bucket_count - 2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - s - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - s - - - - - - - - _col0 - - - key - - - s - - - - - int - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - value - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 2 - - - - - - - - - false - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - boolean - - - - - - - - - FIL_1 - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - - - - s - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - s - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - s - - - - - - string - - - - - - - true - - - ROW__ID - - - s - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - s - - - - - - - #### A masked pattern was here #### - - - srcbucket - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/sample4.q.xml b/ql/src/test/results/compiler/plan/sample4.q.xml index 1b3a948..e69de29 100644 --- a/ql/src/test/results/compiler/plan/sample4.q.xml +++ b/ql/src/test/results/compiler/plan/sample4.q.xml @@ -1,1277 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_5 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_4 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - s - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.srcbucket - - - columns.types - int:string - - - bucket_field_name - key - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - columns - key,value - - - serialization.format - 1 - - - columns.comments - - - - bucket_count - 2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - s - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - s - - - - - - - - _col0 - - - key - - - s - - - - - int - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 2 - - - - - - - - - false - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - boolean - - - - - - - - - - - - FIL_1 - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - - - - s - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - s - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - s - - - - - - string - - - - - - - true - - - ROW__ID - - - s - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - s - - - - - - - #### A masked pattern was here #### - - - srcbucket0.txt - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/sample5.q.xml b/ql/src/test/results/compiler/plan/sample5.q.xml index 3cfd796..e69de29 100644 --- a/ql/src/test/results/compiler/plan/sample5.q.xml +++ b/ql/src/test/results/compiler/plan/sample5.q.xml @@ -1,1274 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_5 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_4 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - s - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.srcbucket - - - columns.types - int:string - - - bucket_field_name - key - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - columns - key,value - - - serialization.format - 1 - - - columns.comments - - - - bucket_count - 2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - s - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - s - - - - - - - - _col0 - - - key - - - s - - - - - int - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 5 - - - - - - - - - false - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - boolean - - - - - - - - - FIL_1 - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - - - - s - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - s - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - s - - - - - - string - - - - - - - true - - - ROW__ID - - - s - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - s - - - - - - - #### A masked pattern was here #### - - - srcbucket - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/sample6.q.xml b/ql/src/test/results/compiler/plan/sample6.q.xml index a4fee5d..e69de29 100644 --- a/ql/src/test/results/compiler/plan/sample6.q.xml +++ b/ql/src/test/results/compiler/plan/sample6.q.xml @@ -1,1277 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_5 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_4 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - s - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.srcbucket - - - columns.types - int:string - - - bucket_field_name - key - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - columns - key,value - - - serialization.format - 1 - - - columns.comments - - - - bucket_count - 2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - s - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - - - - _col1 - - - value - - - s - - - - - - - - _col0 - - - key - - - s - - - - - int - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 4 - - - - - - - - - false - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - boolean - - - - - - - - - - - - FIL_1 - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - - - - s - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - s - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - s - - - - - - string - - - - - - - true - - - ROW__ID - - - s - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - s - - - - - - - #### A masked pattern was here #### - - - srcbucket0.txt - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/sample7.q.xml b/ql/src/test/results/compiler/plan/sample7.q.xml index a5fbe9d..e69de29 100644 --- a/ql/src/test/results/compiler/plan/sample7.q.xml +++ b/ql/src/test/results/compiler/plan/sample7.q.xml @@ -1,1322 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-7 - - - - - - - - - - - - - - - Stage-2 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-3 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - location - #### A masked pattern was here #### - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - 1 - - - - - FS_7 - - - - - - - - - - - - - key - - - - - - - - string - - - - - string - - - - - - - value - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - TS_6 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-6 - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - - - HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME - - - HIVE_DEFAULT_LIST_BUCKETING_KEY - - - - - - - - - - - - - - - - - true - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - s - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.srcbucket - - - columns.types - int:string - - - bucket_field_name - key - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - columns - key,value - - - serialization.format - 1 - - - columns.comments - - - - bucket_count - 2 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - s - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - true - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_4 - - - - - - - - - - - - - - - - _col1 - - - value - - - s - - - - - - - - _col0 - - - key - - - s - - - - - int - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_3 - - - - - - - - - - - - - _col0 - - - s - - - - - - int - - - - - - - _col1 - - - s - - - - - - string - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - - - - - - - - - - - 2147483647 - - - - - - - - - true - - - org.apache.hadoop.hive.ql.udf.UDFOPBitAnd - - - & - - - - - - - - - - - - - - - 4 - - - - - - - - - false - - - - - - - - - - - - - - - 0 - - - - - - - - - - - - boolean - - - - - - - - - - - - - key - - - s - - - - - - - - - - - - - 100 - - - - - - - - - - - - - - - - - - - - - - - - - - - FIL_5 - - - - - - - - - - - - - key - - - s - - - - - - int - - - - - - - value - - - s - - - - - - string - - - - - - - - - - - - - s - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - s - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - s - - - - - - string - - - - - - - true - - - ROW__ID - - - s - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - true - - - - #### A masked pattern was here #### - - - s - - - - - - - #### A masked pattern was here #### - - - srcbucket0.txt - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - columns.types - int:string - - - location - #### A masked pattern was here #### - - - columns - key,value - - - COLUMN_STATS_ACCURATE - true - - - serialization.format - 1 - - - numRows - 1000 - - - numFiles - 2 - - - serialization.ddl - struct srcbucket { i32 key, string value} - - - transient_lastDdlTime - #### A masked pattern was here #### - - - rawDataSize - 10603 - - - columns.comments - - - - totalSize - 11603 - - - bucket_count - 2 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - bucket_field_name - key - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - name - default.srcbucket - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/subq.q.xml b/ql/src/test/results/compiler/plan/subq.q.xml index 385680f..e69de29 100644 --- a/ql/src/test/results/compiler/plan/subq.q.xml +++ b/ql/src/test/results/compiler/plan/subq.q.xml @@ -1,1059 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-6 - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-2 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - - - - - 1 - - - - - FS_7 - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - _col0 - - - _col1 - - - - - - - TS_6 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - string:string - - - _col0,_col1 - - - true - - - - - - - - - - - Stage-3 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - unioninput:src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - unioninput:src - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_4 - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_2 - - - - - - - - - - - - - key - - - _col0 - - - src - - - - - - string - - - - - - - value - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 100 - - - - - - - - - - - - boolean - - - - - - - - - FIL_5 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - unioninput:src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/udf1.q.xml b/ql/src/test/results/compiler/plan/udf1.q.xml index a6c45b2..e69de29 100644 --- a/ql/src/test/results/compiler/plan/udf1.q.xml +++ b/ql/src/test/results/compiler/plan/udf1.q.xml @@ -1,1517 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:boolean:string:string:string:string - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - _col0 - - - - - - - - boolean - - - - - boolean - - - - - - - _col1 - - - - - - - - - boolean - - - - - - - _col2 - - - - - - - - - boolean - - - - - - - _col3 - - - - - - - - - boolean - - - - - - - _col4 - - - - - - - - - boolean - - - - - - - _col5 - - - - - - - - - boolean - - - - - - - _col6 - - - - - - - - - boolean - - - - - - - _col7 - - - - - - - - - boolean - - - - - - - _col8 - - - - - - - - - boolean - - - - - - - _col9 - - - - - - - - - boolean - - - - - - - _col10 - - - - - - - - - boolean - - - - - - - _col11 - - - - - - - - - boolean - - - - - - - _col12 - - - - - - - - - boolean - - - - - - - _col13 - - - - - - - - string - - - - - string - - - - - - - _col14 - - - - - - - - - string - - - - - - - _col15 - - - - - - - - - string - - - - - - - _col16 - - - - - - - - - string - - - - - - - - - - - - - _col8 - - - ('' rlike '.*') - - - - - - true - - - - - _col7 - - - ('ab' like 'a') - - - - - - false - - - - - _col6 - - - ('ab' like '_a%') - - - - - - false - - - - - _col5 - - - ('ab' like '\%\_') - - - - - - false - - - - - _col4 - - - ('%_' like '\%\_') - - - - - - true - - - - - _col3 - - - ('ab' like '%a_') - - - - - - true - - - - - _col2 - - - ('ab' like '%a%') - - - - - - true - - - - - _col1 - - - ('b' like '%a%') - - - - - - false - - - - - _col9 - - - ('a' rlike '[ab]') - - - - - - true - - - - - _col13 - - - regexp_replace('abc', 'b', 'c') - - - - - - acc - - - - - _col12 - - - ('hadoop' rlike 'o*') - - - - - - true - - - - - _col11 - - - ('hadoop' rlike '[a-z]*') - - - - - - true - - - - - _col10 - - - ('' rlike '[ab]') - - - - - - false - - - - - _col16 - - - regexp_replace('hadoop', '(.)[a-z]*', '$1ive') - - - - - - hive - - - - - _col15 - - - regexp_replace('abbbb', 'bb', 'b') - - - - - - abb - - - - - _col14 - - - regexp_replace('abc', 'z', 'a') - - - - - - abc - - - - - _col0 - - - ('a' like '%a%') - - - - - - true - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - _col5 - - - _col6 - - - _col7 - - - _col8 - - - _col9 - - - _col10 - - - _col11 - - - _col12 - - - _col13 - - - _col14 - - - _col15 - - - _col16 - - - - - - - SEL_2 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - boolean - - - - - - - _c1 - - - _col1 - - - - - - boolean - - - - - - - _c2 - - - _col2 - - - - - - boolean - - - - - - - _c3 - - - _col3 - - - - - - boolean - - - - - - - _c4 - - - _col4 - - - - - - boolean - - - - - - - _c5 - - - _col5 - - - - - - boolean - - - - - - - _c6 - - - _col6 - - - - - - boolean - - - - - - - _c7 - - - _col7 - - - - - - boolean - - - - - - - _c8 - - - _col8 - - - - - - boolean - - - - - - - _c9 - - - _col9 - - - - - - boolean - - - - - - - _c10 - - - _col10 - - - - - - boolean - - - - - - - _c11 - - - _col11 - - - - - - boolean - - - - - - - _c12 - - - _col12 - - - - - - boolean - - - - - - - _c13 - - - _col13 - - - - - - string - - - - - - - _c14 - - - _col14 - - - - - - string - - - - - - - _c15 - - - _col15 - - - - - - string - - - - - - - _c16 - - - _col16 - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 86 - - - - - - - - - - - - - - - - - FIL_4 - - - - - - - - - - - - - key - - - key - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/udf4.q.xml b/ql/src/test/results/compiler/plan/udf4.q.xml index 1edf38a..e69de29 100644 --- a/ql/src/test/results/compiler/plan/udf4.q.xml +++ b/ql/src/test/results/compiler/plan/udf4.q.xml @@ -1,1507 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - dest1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - dest1 - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11,_col12,_col13,_col14,_col15,_col16,_col17,_col18 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - double:double:double:bigint:bigint:bigint:double:double:double:bigint:bigint:bigint:bigint:double:int:int:int:int:int - - - escape.delim - \ - - - - - - - 1 - - - - - FS_2 - - - - - - - - - - - - - _col0 - - - - - - - - double - - - - - double - - - - - - - _col1 - - - - - - - - - double - - - - - - - _col2 - - - - - - - - - double - - - - - - - _col3 - - - - - - - - bigint - - - - - bigint - - - - - - - _col4 - - - - - - - - - bigint - - - - - - - _col5 - - - - - - - - - bigint - - - - - - - _col6 - - - - - - - - - double - - - - - - - _col7 - - - - - - - - - double - - - - - - - _col8 - - - - - - - - - double - - - - - - - _col9 - - - - - - - - - bigint - - - - - - - _col10 - - - - - - - - - bigint - - - - - - - _col11 - - - - - - - - - bigint - - - - - - - _col12 - - - - - - - - - bigint - - - - - - - _col13 - - - - - - - - - double - - - - - - - _col14 - - - - - - - - int - - - - - int - - - - - - - _col15 - - - - - - - - - int - - - - - - - _col16 - - - - - - - - - int - - - - - - - _col17 - - - - - - - - - int - - - - - - - _col18 - - - - - - - - - int - - - - - - - - - - - - - _col8 - - - sqrt(0.0) - - - - - - 0.0 - - - - - _col7 - - - - _col6 - - - sqrt(1.0) - - - - - - 1.0 - - - - - _col5 - - - floor((- 1.5)) - - - - - - -2 - - - - - _col4 - - - floor(1.5) - - - - - - 1 - - - - - _col3 - - - floor(1.0) - - - - - - 1 - - - - - _col2 - - - round((- 1.5)) - - - - - - -2.0 - - - - - _col1 - - - round(1.5) - - - - - - 2.0 - - - - - _col9 - - - ceil(1.0) - - - - - - 1 - - - - - _col13 - - - - - - - - - - 3 - - - - - - - - - org.apache.hadoop.hive.ql.udf.UDFRand - - - rand - - - - - - - - - - _col12 - - - ceil(1.0) - - - - - - 1 - - - - - _col11 - - - ceil((- 1.5)) - - - - - - -1 - - - - - _col10 - - - ceil(1.5) - - - - - - 2 - - - - - _col17 - - - (1 + (- 2)) - - - - - - -1 - - - - - _col16 - - - (1 + 2) - - - - - - 3 - - - - - _col15 - - - (- 3) - - - - - - -3 - - - - - _col14 - - - - - - 3 - - - - - _col0 - - - round(1.0) - - - - - - 1.0 - - - - - _col18 - - - (~ 1) - - - - - - -2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - _col2 - - - _col3 - - - _col4 - - - _col5 - - - _col6 - - - _col7 - - - _col8 - - - _col9 - - - _col10 - - - _col11 - - - _col12 - - - _col13 - - - _col14 - - - _col15 - - - _col16 - - - _col17 - - - _col18 - - - - - - - SEL_1 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - double - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - _c2 - - - _col2 - - - - - - double - - - - - - - _c3 - - - _col3 - - - - - - bigint - - - - - - - _c4 - - - _col4 - - - - - - bigint - - - - - - - _c5 - - - _col5 - - - - - - bigint - - - - - - - _c6 - - - _col6 - - - - - - double - - - - - - - _c7 - - - _col7 - - - - - - double - - - - - - - _c8 - - - _col8 - - - - - - double - - - - - - - _c9 - - - _col9 - - - - - - bigint - - - - - - - _c10 - - - _col10 - - - - - - bigint - - - - - - - _c11 - - - _col11 - - - - - - bigint - - - - - - - _c12 - - - _col12 - - - - - - bigint - - - - - - - _c13 - - - _col13 - - - - - - double - - - - - - - _c14 - - - _col14 - - - - - - int - - - - - - - _c15 - - - _col15 - - - - - - int - - - - - - - _c16 - - - _col16 - - - - - - int - - - - - - - _c17 - - - _col17 - - - - - - int - - - - - - - _c18 - - - _col18 - - - - - - int - - - - - - - - - - - - - dest1 - - - - - - - - - - - - - - TS_0 - - - - - - - - - - - key - - - dest1 - - - - - string - - - - - string - - - - - - - value - - - dest1 - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - dest1 - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - dest1 - - - - - - string - - - - - - - true - - - ROW__ID - - - dest1 - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - dest1 - - - - - - - #### A masked pattern was here #### - - - dest1 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.dest1 - - - columns.types - string:string - - - serialization.ddl - struct dest1 { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/udf6.q.xml b/ql/src/test/results/compiler/plan/udf6.q.xml index c4c8980..e69de29 100644 --- a/ql/src/test/results/compiler/plan/udf6.q.xml +++ b/ql/src/test/results/compiler/plan/udf6.q.xml @@ -1,698 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:double - - - escape.delim - \ - - - - - - - 1 - - - - - FS_2 - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - double - - - - - double - - - - - - - - - - - - - _col1 - - - - - - - - - int - - - - - 1 - - - - - - - key - - - src - - - - - - - - - - - - false - - - - - - - - - - _col0 - - - concat('a', 'b') - - - - - - ab - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_1 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - string - - - - - - - _c1 - - - _col1 - - - - - - double - - - - - - - - - - - - - src - - - - - 0 - - - - - - - key - - - - - - - - - - TS_0 - - - - - key - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/udf_case.q.xml b/ql/src/test/results/compiler/plan/udf_case.q.xml index 3518469..e69de29 100644 --- a/ql/src/test/results/compiler/plan/udf_case.q.xml +++ b/ql/src/test/results/compiler/plan/udf_case.q.xml @@ -1,672 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:int - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - _col0 - - - - - - - - int - - - - - int - - - - - - - _col1 - - - - - - - - - int - - - - - - - - - - - - - 1 - - - - - LIM_2 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - - - - - - _c1 - - - _col1 - - - - - - int - - - - - - - - - - - - - _col1 - - - - _col0 - - - CASE (1) WHEN (1) THEN (2) WHEN (3) THEN (4) ELSE (5) END - - - - - - 2 - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_1 - - - - - - - - - - - - - - - - - - - - src - - - - - - - - - - - - - - TS_0 - - - - - - - - - - - key - - - src - - - - - string - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/udf_when.q.xml b/ql/src/test/results/compiler/plan/udf_when.q.xml index 4a1d604..e69de29 100644 --- a/ql/src/test/results/compiler/plan/udf_when.q.xml +++ b/ql/src/test/results/compiler/plan/udf_when.q.xml @@ -1,672 +0,0 @@ - -#### A masked pattern was here #### - - - Stage-1 - - - - - true - - - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - src - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - hive.serialization.extend.nesting.levels - true - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - int:int - - - escape.delim - \ - - - - - - - 1 - - - - - FS_3 - - - - - - - - - - - - - _col0 - - - - - - - - int - - - - - int - - - - - - - _col1 - - - - - - - - - int - - - - - - - - - - - - - 1 - - - - - LIM_2 - - - - - - - - - - - - - _c0 - - - _col0 - - - - - - int - - - - - - - _c1 - - - _col1 - - - - - - int - - - - - - - - - - - - - _col1 - - - - _col0 - - - CASE WHEN ((1 = 1)) THEN (2) WHEN ((3 = 5)) THEN (4) ELSE (5) END - - - - - - 2 - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_1 - - - - - - - - - - - - - - - - - - - - src - - - - - - - - - - - - - - TS_0 - - - - - - - - - - - key - - - src - - - - - string - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - #### A masked pattern was here #### - - - src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/results/compiler/plan/union.q.xml b/ql/src/test/results/compiler/plan/union.q.xml index e09bee6..e69de29 100644 --- a/ql/src/test/results/compiler/plan/union.q.xml +++ b/ql/src/test/results/compiler/plan/union.q.xml @@ -1,1619 +0,0 @@ - -#### A masked pattern was here #### - - - - - - - Stage-6 - - - - - - - - - - - Stage-0 - - - - - - - - - - - - - - - - - Stage-2 - - - - - - - #### A masked pattern was here #### - - - - - - - - - #### A masked pattern was here #### - - - - - NONE - - - - 1 - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - - - - - 1 - - - - - FS_12 - - - - - - - - - - - - - _col0 - - - - - - - - string - - - - - string - - - - - - - _col1 - - - - - - - - - string - - - - - - - - - - - - - - - 0 - - - 1 - - - - - - - _col0 - - - _col1 - - - - - - - TS_11 - - - - - - - - - - - - org.apache.hadoop.hive.ql.io.CombineHiveInputFormat - - - true - - - - #### A masked pattern was here #### - - - #### A masked pattern was here #### - - - - - - - #### A masked pattern was here #### - - - -ext-10002 - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - columns - _col0,_col1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - serialization.format - 1 - - - columns.types - string:string - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - - - - - - - - - - - - - - - - - - Stage-5 - - - - - - - - - - - - - - Stage-4 - - - - - - - - - - - - - - true - - - - - - - - - - - - - - - string:string - - - _col0,_col1 - - - true - - - - - - - - - - - Stage-3 - - - - - - - - - - - - - - - - - - - - - - - - - - - - #### A masked pattern was here #### - - - - - - - - - - - - - - - - - - - - - - - - - - Stage-1 - - - - - true - - - - - null-subquery1:unioninput-subquery1:src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - null-subquery2:unioninput-subquery2:src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - columns.comments - defaultdefault - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - - - - - - - - - - null-subquery1:unioninput-subquery1:src - - - - - - - - - - - - - - - - - - - - 1 - - - - #### A masked pattern was here #### - - - - - - - 1 - - - #### A masked pattern was here #### - - - true - - - - - - 1 - - - - - FS_8 - - - - - - - - - - - - - - - - _col1 - - - _col1 - - - src - - - - - - - - _col0 - - - _col0 - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_7 - - - - - - - - - - - - - key - - - _col0 - - - src - - - - - - string - - - - - - - value - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - UNION_6 - - - - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - true - - - - - SEL_5 - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - int - - - - - 100 - - - - - - - - - - - - boolean - - - - - - - - - FIL_10 - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_3 - - - - - key - - - value - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - bigint - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - - transactionid - - - bucketid - - - rowid - - - - - - - - - - - - - - - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - _col1 - - - value - - - src - - - - - - - - _col0 - - - key - - - src - - - - - - - - - - - - - - - - - - - - - - - - _col0 - - - _col1 - - - - - - - SEL_2 - - - - - - - - - - - - - _col0 - - - src - - - - - - string - - - - - - - _col1 - - - src - - - - - - string - - - - - - - - - - - - - - - - - - - key - - - src - - - - - - - - - - - - - 100 - - - - - - - - - - - - - - - - - FIL_9 - - - - - - - - - - - - - key - - - src - - - - - - string - - - - - - - value - - - src - - - - - - string - - - - - - - - - - - - - src - - - - - 0 - - - 1 - - - - - - - key - - - value - - - - - - - - - - TS_0 - - - - - key - - - value - - - - - - - - - - - - - - - - true - - - BLOCK__OFFSET__INSIDE__FILE - - - src - - - - - - bigint - - - - - - - true - - - INPUT__FILE__NAME - - - src - - - - - - string - - - - - - - true - - - ROW__ID - - - src - - - - - - struct<transactionid:bigint,bucketid:int,rowid:bigint> - - - - - - - - - - null-subquery2:unioninput-subquery2:src - - - - - - #### A masked pattern was here #### - - - null-subquery1:unioninput-subquery1:src - - - null-subquery2:unioninput-subquery2:src - - - - - - - #### A masked pattern was here #### - - - src - - - org.apache.hadoop.mapred.TextInputFormat - - - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - - - - - - name - default.src - - - numFiles - 1 - - - columns.types - string:string - - - serialization.ddl - struct src { string key, string value} - - - serialization.format - 1 - - - columns - key,value - - - rawDataSize - 5312 - - - columns.comments - defaultdefault - - - numRows - 500 - - - bucket_count - -1 - - - serialization.lib - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - - COLUMN_STATS_ACCURATE - true - - - file.inputformat - org.apache.hadoop.mapred.TextInputFormat - - - totalSize - 5812 - - - file.outputformat - org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - - - location - #### A masked pattern was here #### - - - transient_lastDdlTime - #### A masked pattern was here #### - - - - - - - - - - - - - - diff --git a/ql/src/test/templates/TestParse.vm b/ql/src/test/templates/TestParse.vm index 8c7d747..e69de29 100644 --- a/ql/src/test/templates/TestParse.vm +++ b/ql/src/test/templates/TestParse.vm @@ -1,151 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.ql.parse; - -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -import java.io.*; -import java.util.*; - -import org.apache.hadoop.hive.ql.QTestUtil; -import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; -import org.apache.hadoop.hive.ql.exec.Task; - -public class $className extends TestCase { - - private static final String HIVE_ROOT = QTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root")); - private static QTestUtil qt; - - static { - MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode"); - String initScript = "$initScript"; - String cleanupScript = "$cleanupScript"; - - try { - String hadoopVer = "$hadoopVersion"; - qt = new QTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, hadoopVer, - initScript, cleanupScript); - qt.init(null); - } catch (Exception e) { - System.err.println("Exception: " + e.getMessage()); - e.printStackTrace(); - System.err.flush(); - fail("Unexpected exception in static initialization"); - } - } - - - public $className(String name) { - super(name); - } - - @Override - protected void tearDown() { - try { - qt.clearPostTestEffects(); - if (getName().equals("testParse_shutdown")) - qt.shutdown(); - } - catch (Exception e) { - System.err.println("Exception: " + e.getMessage()); - e.printStackTrace(); - System.err.flush(); - fail("Unexpected exception in tearDown"); - } - } - - public static Test suite() { - Set qFilesToExecute = new HashSet(); - String qFiles = System.getProperty("qfile", "").trim(); - if(!qFiles.isEmpty()) { - for(String qFile : qFiles.split(",")) { - qFile = qFile.trim(); - if(!qFile.isEmpty()) { - qFilesToExecute.add(qFile); - } - } - } - TestSuite suite = new TestSuite(); - -#foreach ($qf in $qfiles) - #set ($fname = $qf.getName()) - #set ($eidx = $fname.indexOf('.')) - #set ($tname = $fname.substring(0, $eidx)) - if(qFilesToExecute.isEmpty() || qFilesToExecute.contains("$fname")) { - suite.addTest(new $className("testParse_$tname")); - } -#end - suite.addTest(new $className("testParse_shutdown")); - return suite; - } - - /** - * Dummy last test. This is only meant to shutdown qt - */ - public void testParse_shutdown() { - System.err.println ("Cleaning up " + "$className"); - } - - static String debugHint = "\nSee ./ql/target/tmp/log/hive.log or ./itests/qtest/target/tmp/log/hive.log, " - + "or check ./ql/target/surefire-reports or ./itests/qtest/target/surefire-reports/ for specific test cases logs."; - -#foreach ($qf in $qfiles) - #set ($fname = $qf.getName()) - #set ($eidx = $fname.indexOf('.')) - #set ($tname = $fname.substring(0, $eidx)) - #set ($fpath = $qfilesMap.get($fname)) - public void testParse_$tname() throws Exception { - runTest("$tname", "$fname", HIVE_ROOT + "$fpath"); - } - -#end - - private void runTest(String tname, String fname, String fpath) throws Exception { - long startTime = System.currentTimeMillis(); - try { - System.err.println("Begin query: " + fname); - - qt.addFile(fpath); - org.apache.hadoop.hive.ql.exec.Operator.resetId(); - org.apache.hadoop.hive.ql.exec.TaskFactory.resetId(); - - ASTNode tree = qt.parseQuery(fname); - int ecode = qt.checkParseResults(fname, tree); - if (ecode != 0) { - fail("Parse has unexpected out with error code = " + ecode + debugHint); - } - List> tasks = qt.analyzeAST(tree); - ecode = qt.checkPlan(fname, tasks); - if (ecode != 0) { - fail("Semantic Analysis has unexpected output with error code = " + ecode - + debugHint); - } - System.err.println("Done query: " + fname); - qt.getQMap().clear(); - } - catch (Throwable e) { - qt.failed(e, fname, debugHint); - } - - long elapsedTime = System.currentTimeMillis() - startTime; - System.err.println("Done query: " + fname + " elapsedTime=" + elapsedTime/1000 + "s"); - assertTrue("Test passed", true); - } -} diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorException.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorException.java index fe4b2eb..1953c04 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorException.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorException.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.serde2.avro; /** diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java index 73ae2d9..2b7fba6 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java @@ -779,7 +779,7 @@ static void serialize(ByteStream.Output buffer, Object o, ObjectInspector oi, // get the scale factor to turn big decimal into a decimal < 1 int factor = dec.precision() - dec.scale(); - factor = sign != -1 ? factor : -factor; + factor = sign == 1 ? factor : -factor; // convert the absolute big decimal to string dec.scaleByPowerOfTen(Math.abs(dec.scale())); diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java index d307b0f..e844979 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/ObjectInspectorUtils.java @@ -951,9 +951,9 @@ public static boolean compareTypes(ObjectInspector o1, ObjectInspector o2) { if (childFieldsList1 == null && childFieldsList2 == null) { return true; - } - - if (childFieldsList1.size() != childFieldsList2.size()) { + } else if (childFieldsList1 == null || childFieldsList2 == null) { + return false; + } else if (childFieldsList1.size() != childFieldsList2.size()) { return false; } diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestDateWritable.java b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestDateWritable.java index 8b5bfbd..fd95ccf 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/io/TestDateWritable.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/io/TestDateWritable.java @@ -1,3 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package org.apache.hadoop.hive.serde2.io; import com.google.code.tempusfugit.concurrency.annotations.*; diff --git a/service/src/java/org/apache/hadoop/hive/service/HiveClient.java b/service/src/java/org/apache/hadoop/hive/service/HiveClient.java index 3f62acc..e69de29 100644 --- a/service/src/java/org/apache/hadoop/hive/service/HiveClient.java +++ b/service/src/java/org/apache/hadoop/hive/service/HiveClient.java @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.service; - -import org.apache.thrift.protocol.TProtocol; - -/** - * Thrift Hive Client Just an empty class that can be used to run queries on a - * stand alone hive server. - */ -public class HiveClient extends ThriftHive.Client implements HiveInterface { - public HiveClient(TProtocol prot) { - super(prot, prot); - } -} diff --git a/service/src/java/org/apache/hadoop/hive/service/HiveInterface.java b/service/src/java/org/apache/hadoop/hive/service/HiveInterface.java index 080e96e..e69de29 100644 --- a/service/src/java/org/apache/hadoop/hive/service/HiveInterface.java +++ b/service/src/java/org/apache/hadoop/hive/service/HiveInterface.java @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.service; - -/** - * HiveInterface extends 2 interfaces, ThriftHive and ThriftHiveMetastore. - * - * ThriftHive.Iface is defined in: - * service/src/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java - * ThriftHiveMetastore.Iface is defined in: - * metastore/src/gen-javabean/org/apache - * /hadoop/hive/metastore/api/ThriftHiveMetastore.java - * - * These interfaces are generated by Thrift. The thrift files are in: - * ThriftHive: service/if/hive_service.thrift ThriftHiveMetastore: - * metastore/if/hive_metastore.thrift - */ -public interface HiveInterface extends ThriftHive.Iface, - org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface { -} diff --git a/service/src/java/org/apache/hadoop/hive/service/HiveServer.java b/service/src/java/org/apache/hadoop/hive/service/HiveServer.java index 27c7722..e69de29 100644 --- a/service/src/java/org/apache/hadoop/hive/service/HiveServer.java +++ b/service/src/java/org/apache/hadoop/hive/service/HiveServer.java @@ -1,711 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.service; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.IOException; -import java.io.PrintStream; -import java.io.UnsupportedEncodingException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.cli.OptionBuilder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.common.ServerUtils; -import org.apache.hadoop.hive.common.LogUtils; -import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; -import org.apache.hadoop.hive.common.cli.CommonCliOptions; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStore; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Schema; -import org.apache.hadoop.hive.metastore.TServerSocketKeepAlive; -import org.apache.hadoop.hive.ql.CommandNeedRetryException; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.plan.api.QueryPlan; -import org.apache.hadoop.hive.ql.processors.CommandProcessor; -import org.apache.hadoop.hive.ql.processors.CommandProcessorFactory; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.shims.ShimLoader; -import org.apache.hadoop.mapred.ClusterStatus; -import org.apache.thrift.TException; -import org.apache.thrift.TProcessor; -import org.apache.thrift.TProcessorFactory; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.server.TServer; -import org.apache.thrift.server.TThreadPoolServer; -import org.apache.thrift.transport.TServerSocket; -import org.apache.thrift.transport.TServerTransport; -import org.apache.thrift.transport.TTransport; -import org.apache.thrift.transport.TTransportFactory; -import com.facebook.fb303.fb_status; - -/** - * Thrift Hive Server Implementation. - */ -public class HiveServer extends ThriftHive { - private static final String VERSION = "1"; - - /** - * default port on which to start the Hive server - */ - private static final int DEFAULT_HIVE_SERVER_PORT = 10000; - - /** - * default minimum number of threads serving the Hive server - */ - private static final int DEFAULT_MIN_WORKER_THREADS = 100; - - /** - * default maximum number of threads serving the Hive server - */ - private static final int DEFAULT_MAX_WORKER_THREADS = Integer.MAX_VALUE; - - /** - * Handler which implements the Hive Interface This class can be used in lieu - * of the HiveClient class to get an embedded server. - */ - public static class HiveServerHandler extends HiveMetaStore.HMSHandler - implements HiveInterface { - /** - * Hive server uses org.apache.hadoop.hive.ql.Driver for run() and - * getResults() methods. - * It is the instance of the last Hive query. - */ - private Driver driver; - private CommandProcessorResponse response; - /** - * For processors other than Hive queries (Driver), they output to session.out (a temp file) - * first and the fetchOne/fetchN/fetchAll functions get the output from pipeIn. - */ - private BufferedReader pipeIn; - - /** - * Flag that indicates whether the last executed command was a Hive query. - */ - private boolean isHiveQuery; - - public static final Log LOG = LogFactory.getLog(HiveServer.class.getName()); - - /** - * Construct a new handler. - * - * @throws MetaException unable to create metastore - */ - public HiveServerHandler() throws MetaException { - this(new HiveConf(SessionState.class)); - } - - /** - * Construct a new handler with the specified hive configuration. - * - * @param conf caller specified hive configuration - * @throws MetaException unable to create metastore - */ - public HiveServerHandler(HiveConf conf) throws MetaException { - super(HiveServer.class.getName(), conf); - - isHiveQuery = false; - driver = null; - SessionState session = new SessionState(conf); - SessionState.start(session); - setupSessionIO(session); - } - - private void setupSessionIO(SessionState session) { - try { - LOG.info("Putting temp output to file " + session.getTmpOutputFile().toString()); - session.in = null; // hive server's session input stream is not used - // open a per-session file in auto-flush mode for writing temp results - session.out = new PrintStream(new FileOutputStream(session.getTmpOutputFile()), true, "UTF-8"); - // TODO: for hadoop jobs, progress is printed out to session.err, - // we should find a way to feed back job progress to client - session.err = new PrintStream(System.err, true, "UTF-8"); - } catch (IOException e) { - LOG.error("Error in creating temp output file ", e); - try { - session.in = null; - session.out = new PrintStream(System.out, true, "UTF-8"); - session.err = new PrintStream(System.err, true, "UTF-8"); - } catch (UnsupportedEncodingException ee) { - ee.printStackTrace(); - session.out = null; - session.err = null; - } - } - } - - /** - * Executes a query. - * - * @param cmd - * HiveQL query to execute - */ - public void execute(String cmd) throws HiveServerException, TException { - HiveServerHandler.LOG.info("Running the query: " + cmd); - SessionState session = SessionState.get(); - - String cmd_trimmed = cmd.trim(); - String[] tokens = cmd_trimmed.split("\\s"); - String cmd_1 = cmd_trimmed.substring(tokens[0].length()).trim(); - - int ret = 0; - String errorMessage = ""; - String SQLState = null; - - try { - // Close the existing driver object (CommandProcessor) before creating - // the new driver (CommandProcessor) object to clean-up the resources - if (driver != null) { - driver.close(); - driver = null; - } - CommandProcessor proc = CommandProcessorFactory.get(tokens[0]); - if (proc != null) { - if (proc instanceof Driver) { - isHiveQuery = true; - driver = (Driver) proc; - // In Hive server mode, we are not able to retry in the FetchTask - // case, when calling fetch quueries since execute() has returned. - // For now, we disable the test attempts. - driver.setTryCount(Integer.MAX_VALUE); - response = driver.run(cmd); - } else { - isHiveQuery = false; - driver = null; - // need to reset output for each non-Hive query - setupSessionIO(session); - response = proc.run(cmd_1); - } - - ret = response.getResponseCode(); - SQLState = response.getSQLState(); - errorMessage = response.getErrorMessage(); - } - } catch (Exception e) { - HiveServerException ex = new HiveServerException(); - ex.setMessage("Error running query: " + e.toString()); - ex.setErrorCode(ret == 0? -10000: ret); - throw ex; - } - - if (ret != 0) { - throw new HiveServerException("Query returned non-zero code: " + ret - + ", cause: " + errorMessage, ret, SQLState); - } - } - - /** - * Should be called by the client at the end of a session. - */ - public void clean() { - if (driver != null) { - driver.close(); - driver.destroy(); - } - - SessionState session = SessionState.get(); - if (session.getTmpOutputFile() != null) { - session.getTmpOutputFile().delete(); - } - pipeIn = null; - } - - /** - * Return the status information about the Map-Reduce cluster. - */ - public HiveClusterStatus getClusterStatus() throws HiveServerException, - TException { - HiveClusterStatus hcs; - try { - Driver drv = new Driver(); - drv.init(); - - ClusterStatus cs = drv.getClusterStatus(); - JobTrackerState state = JobTrackerState.valueOf(ShimLoader.getHadoopShims().getJobTrackerState(cs).name()); - - hcs = new HiveClusterStatus(cs.getTaskTrackers(), cs.getMapTasks(), cs - .getReduceTasks(), cs.getMaxMapTasks(), cs.getMaxReduceTasks(), - state); - } catch (Exception e) { - LOG.error(e.toString()); - e.printStackTrace(); - HiveServerException ex = new HiveServerException(); - ex.setMessage("Unable to get cluster status: " + e.toString()); - throw ex; - } - return hcs; - } - - /** - * Return the Hive schema of the query result. - */ - public Schema getSchema() throws HiveServerException, TException { - if (!isHiveQuery) { - Schema schema = response.getSchema(); - if (schema == null) { - // Return empty schema if the last command was not a Hive query - return new Schema(); - } - else { - return schema; - } - } - - assert driver != null: "getSchema() is called on a Hive query and driver is NULL."; - - try { - Schema schema = driver.getSchema(); - if (schema == null) { - schema = new Schema(); - } - LOG.info("Returning schema: " + schema); - return schema; - } catch (Exception e) { - LOG.error(e.toString()); - e.printStackTrace(); - HiveServerException ex = new HiveServerException(); - ex.setMessage("Unable to get schema: " + e.toString()); - throw ex; - } - } - - /** - * Return the Thrift schema of the query result. - */ - public Schema getThriftSchema() throws HiveServerException, TException { - if (!isHiveQuery) { - // Return empty schema if the last command was not a Hive query - return new Schema(); - } - - assert driver != null: "getThriftSchema() is called on a Hive query and driver is NULL."; - - try { - Schema schema = driver.getThriftSchema(); - if (schema == null) { - schema = new Schema(); - } - LOG.info("Returning schema: " + schema); - return schema; - } catch (Exception e) { - LOG.error(e.toString()); - e.printStackTrace(); - HiveServerException ex = new HiveServerException(); - ex.setMessage("Unable to get schema: " + e.toString()); - throw ex; - } - } - - - /** - * Fetches the next row in a query result set. - * - * @return the next row in a query result set. null if there is no more row - * to fetch. - */ - public String fetchOne() throws HiveServerException, TException { - if (!isHiveQuery) { - // Return no results if the last command was not a Hive query - List results = new ArrayList(1); - readResults(results, 1); - if (results.size() > 0) { - return results.get(0); - } else { // throw an EOF exception - throw new HiveServerException("OK", 0, ""); - } - } - - assert driver != null: "fetchOne() is called on a Hive query and driver is NULL."; - - ArrayList result = new ArrayList(); - driver.setMaxRows(1); - try { - if (driver.getResults(result)) { - return result.get(0); - } - // TODO: Cannot return null here because thrift cannot handle nulls - // TODO: Returning empty string for now. Need to figure out how to - // TODO: return null in some other way - throw new HiveServerException("OK", 0, ""); - // return ""; - } catch (CommandNeedRetryException e) { - HiveServerException ex = new HiveServerException(); - ex.setMessage(e.getMessage()); - throw ex; - } catch (IOException e) { - HiveServerException ex = new HiveServerException(); - ex.setMessage(e.getMessage()); - throw ex; - } - } - - private void cleanTmpFile() { - if (pipeIn != null) { - SessionState session = SessionState.get(); - File tmp = session.getTmpOutputFile(); - tmp.delete(); - pipeIn = null; - } - } - - /** - * Reads the temporary results for non-Hive (non-Driver) commands to the - * resulting List of strings. - * @param results list of strings containing the results - * @param nLines number of lines read at once. If it is <= 0, then read all lines. - */ - private void readResults(List results, int nLines) { - - if (pipeIn == null) { - SessionState session = SessionState.get(); - File tmp = session.getTmpOutputFile(); - try { - pipeIn = new BufferedReader(new FileReader(tmp)); - } catch (FileNotFoundException e) { - LOG.error("File " + tmp + " not found. ", e); - return; - } - } - - boolean readAll = false; - - for (int i = 0; i < nLines || nLines <= 0; ++i) { - try { - String line = pipeIn.readLine(); - if (line == null) { - // reached the end of the result file - readAll = true; - break; - } else { - results.add(line); - } - } catch (IOException e) { - LOG.error("Reading temp results encountered an exception: ", e); - readAll = true; - } - } - if (readAll) { - cleanTmpFile(); - } - } - - /** - * Fetches numRows rows. - * - * @param numRows - * Number of rows to fetch. - * @return A list of rows. The size of the list is numRows if there are at - * least numRows rows available to return. The size is smaller than - * numRows if there aren't enough rows. The list will be empty if - * there is no more row to fetch or numRows == 0. - * @throws HiveServerException - * Invalid value for numRows (numRows < 0) - */ - public List fetchN(int numRows) throws HiveServerException, - TException { - if (numRows < 0) { - HiveServerException ex = new HiveServerException(); - ex.setMessage("Invalid argument for number of rows: " + numRows); - throw ex; - } - - ArrayList result = new ArrayList(); - - if (!isHiveQuery) { - readResults(result, numRows); - return result; - } - - assert driver != null: "fetchN() is called on a Hive query and driver is NULL."; - - driver.setMaxRows(numRows); - try { - driver.getResults(result); - } catch (CommandNeedRetryException e) { - HiveServerException ex = new HiveServerException(); - ex.setMessage(e.getMessage()); - throw ex; - } catch (IOException e) { - HiveServerException ex = new HiveServerException(); - ex.setMessage(e.getMessage()); - throw ex; - } - return result; - } - - /** - * Fetches all the rows in a result set. - * - * @return All the rows in a result set of a query executed using execute - * method. - * - * TODO: Currently the server buffers all the rows before returning - * them to the client. Decide whether the buffering should be done - * in the client. - */ - public List fetchAll() throws HiveServerException, TException { - - ArrayList rows = new ArrayList(); - ArrayList result = new ArrayList(); - - if (!isHiveQuery) { - // Return all results if numRows <= 0 - readResults(result, 0); - return result; - } - - try { - while (driver.getResults(result)) { - rows.addAll(result); - result.clear(); - } - } catch (CommandNeedRetryException e) { - HiveServerException ex = new HiveServerException(); - ex.setMessage(e.getMessage()); - throw ex; - } catch (IOException e) { - HiveServerException ex = new HiveServerException(); - ex.setMessage(e.getMessage()); - throw ex; - } - return rows; - } - - /** - * Return the status of the server. - */ - @Override - public fb_status getStatus() { - return fb_status.ALIVE; - } - - /** - * Return the version of the server software. - */ - @Override - public String getVersion() { - return VERSION; - } - - @Override - public QueryPlan getQueryPlan() throws HiveServerException, TException { - QueryPlan qp = new QueryPlan(); - - if (!isHiveQuery) { - return qp; - } - - assert driver != null: "getQueryPlan() is called on a Hive query and driver is NULL."; - - // TODO for now only return one query at a time - // going forward, all queries associated with a single statement - // will be returned in a single QueryPlan - try { - qp.addToQueries(driver.getQueryPlan()); - } catch (Exception e) { - HiveServerException ex = new HiveServerException(); - ex.setMessage(e.toString()); - throw ex; - } - return qp; - } - - } - - /** - * ThriftHiveProcessorFactory. - * - */ - public static class ThriftHiveProcessorFactory extends TProcessorFactory { - private final HiveConf conf; - - public ThriftHiveProcessorFactory(TProcessor processor, HiveConf conf) { - super(processor); - this.conf = conf; - } - - @Override - @SuppressWarnings("unchecked") - public TProcessor getProcessor(TTransport trans) { - try { - Iface handler = new HiveServerHandler(new HiveConf(conf)); - return new ThriftHive.Processor(handler); - } catch (Exception e) { - HiveServerHandler.LOG.warn("Failed to get processor by exception " + e, e); - trans.close(); - throw new RuntimeException(e); - } - } - } - - /** - * HiveServer specific CLI - * - */ - static public class HiveServerCli extends CommonCliOptions { - private static final String OPTION_MAX_WORKER_THREADS = "maxWorkerThreads"; - private static final String OPTION_MIN_WORKER_THREADS = "minWorkerThreads"; - - public int port = DEFAULT_HIVE_SERVER_PORT; - public int minWorkerThreads = DEFAULT_MIN_WORKER_THREADS; - public int maxWorkerThreads = DEFAULT_MAX_WORKER_THREADS; - - @SuppressWarnings("static-access") - public HiveServerCli() { - super("hiveserver", true); - - // -p port - OPTIONS.addOption(OptionBuilder - .hasArg() - .withArgName("port") - .withDescription("Hive Server port number, default:" - + DEFAULT_HIVE_SERVER_PORT) - .create('p')); - - // min worker thread count - OPTIONS.addOption(OptionBuilder - .hasArg() - .withLongOpt(OPTION_MIN_WORKER_THREADS) - .withDescription("minimum number of worker threads, default:" - + DEFAULT_MIN_WORKER_THREADS) - .create()); - - // max worker thread count - OPTIONS.addOption(OptionBuilder - .hasArg() - .withLongOpt(OPTION_MAX_WORKER_THREADS) - .withDescription("maximum number of worker threads, default:" - + DEFAULT_MAX_WORKER_THREADS) - .create()); - } - - @Override - public void parse(String[] args) { - super.parse(args); - - // support the old syntax "hiveserver [port [threads]]" but complain - args = commandLine.getArgs(); - if (args.length >= 1) { - // complain about the deprecated syntax -- but still run - System.err.println( - "This usage has been deprecated, consider using the new command " - + "line syntax (run with -h to see usage information)"); - - port = Integer.parseInt(args[0]); - } - if (args.length >= 2) { - minWorkerThreads = Integer.parseInt(args[1]); - } - - // notice that command line options take precedence over the - // deprecated (old style) naked args... - if (commandLine.hasOption('p')) { - port = Integer.parseInt(commandLine.getOptionValue('p')); - } else { - // legacy handling - String hivePort = System.getenv("HIVE_PORT"); - if (hivePort != null) { - port = Integer.parseInt(hivePort); - } - } - if (commandLine.hasOption(OPTION_MIN_WORKER_THREADS)) { - minWorkerThreads = Integer.parseInt( - commandLine.getOptionValue(OPTION_MIN_WORKER_THREADS)); - } - if (commandLine.hasOption(OPTION_MAX_WORKER_THREADS)) { - maxWorkerThreads = Integer.parseInt( - commandLine.getOptionValue(OPTION_MAX_WORKER_THREADS)); - } - } - } - - public static void main(String[] args) { - try { - HiveServerCli cli = new HiveServerCli(); - - cli.parse(args); - - - // NOTE: It is critical to do this prior to initializing log4j, otherwise - // any log specific settings via hiveconf will be ignored - Properties hiveconf = cli.addHiveconfToSystemProperties(); - - // NOTE: It is critical to do this here so that log4j is reinitialized - // before any of the other core hive classes are loaded - try { - LogUtils.initHiveLog4j(); - } catch (LogInitializationException e) { - HiveServerHandler.LOG.warn(e.getMessage()); - } - - HiveConf conf = new HiveConf(HiveServerHandler.class); - ServerUtils.cleanUpScratchDir(conf); - - - boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.SERVER_TCP_KEEP_ALIVE); - int timeout = (int) HiveConf.getTimeVar( - conf, HiveConf.ConfVars.SERVER_READ_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); - - TServerTransport serverTransport = - tcpKeepAlive ? new TServerSocketKeepAlive(cli.port) : new TServerSocket(cli.port, timeout); - - // set all properties specified on the command line - for (Map.Entry item : hiveconf.entrySet()) { - conf.set((String) item.getKey(), (String) item.getValue()); - } - - ThriftHiveProcessorFactory hfactory = - new ThriftHiveProcessorFactory(null, conf); - - TThreadPoolServer.Args sargs = new TThreadPoolServer.Args(serverTransport) - .processorFactory(hfactory) - .transportFactory(new TTransportFactory()) - .protocolFactory(new TBinaryProtocol.Factory()) - .minWorkerThreads(cli.minWorkerThreads) - .maxWorkerThreads(cli.maxWorkerThreads); - - TServer server = new TThreadPoolServer(sargs); - - String msg = "Starting hive server on port " + cli.port - + " with " + cli.minWorkerThreads + " min worker threads and " - + cli.maxWorkerThreads + " max worker threads"; - HiveServerHandler.LOG.info(msg); - - HiveServerHandler.LOG.info("TCP keepalive = " + tcpKeepAlive); - - if (cli.isVerbose()) { - System.err.println(msg); - } - - server.serve(); - } catch (Exception x) { - x.printStackTrace(); - } - } -} diff --git a/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java b/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java index 6c645b8..23ba79c 100644 --- a/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java +++ b/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java @@ -218,6 +218,7 @@ public static TServerSocket getServerSocket(String hiveHost, int portNum) throws TTransportException { InetSocketAddress serverAddress; if (hiveHost == null || hiveHost.isEmpty()) { + // Wildcard bind serverAddress = new InetSocketAddress(portNum); } else { serverAddress = new InetSocketAddress(hiveHost, portNum); @@ -226,25 +227,26 @@ public static TServerSocket getServerSocket(String hiveHost, int portNum) } public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, String keyStorePath, - String keyStorePassWord, List sslVersionBlacklist) - throws TTransportException, UnknownHostException { + String keyStorePassWord, List sslVersionBlacklist) throws TTransportException, + UnknownHostException { TSSLTransportFactory.TSSLTransportParameters params = - new TSSLTransportFactory.TSSLTransportParameters(); + new TSSLTransportFactory.TSSLTransportParameters(); params.setKeyStore(keyStorePath, keyStorePassWord); - - InetAddress serverAddress; + InetSocketAddress serverAddress; if (hiveHost == null || hiveHost.isEmpty()) { - serverAddress = InetAddress.getLocalHost(); + // Wildcard bind + serverAddress = new InetSocketAddress(portNum); } else { - serverAddress = InetAddress.getByName(hiveHost); + serverAddress = new InetSocketAddress(hiveHost, portNum); } - TServerSocket thriftServerSocket = TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress, params); + TServerSocket thriftServerSocket = + TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress.getAddress(), params); if (thriftServerSocket.getServerSocket() instanceof SSLServerSocket) { List sslVersionBlacklistLocal = new ArrayList(); for (String sslVersion : sslVersionBlacklist) { sslVersionBlacklistLocal.add(sslVersion.trim().toLowerCase()); } - SSLServerSocket sslServerSocket = (SSLServerSocket)thriftServerSocket.getServerSocket(); + SSLServerSocket sslServerSocket = (SSLServerSocket) thriftServerSocket.getServerSocket(); List enabledProtocols = new ArrayList(); for (String protocol : sslServerSocket.getEnabledProtocols()) { if (sslVersionBlacklistLocal.contains(protocol.toLowerCase())) { @@ -254,7 +256,8 @@ public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, Str } } sslServerSocket.setEnabledProtocols(enabledProtocols.toArray(new String[0])); - LOG.info("SSL Server Socket Enabled Protocols: " + Arrays.toString(sslServerSocket.getEnabledProtocols())); + LOG.info("SSL Server Socket Enabled Protocols: " + + Arrays.toString(sslServerSocket.getEnabledProtocols())); } return thriftServerSocket; } diff --git a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java index 7e61919..3bf2960 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java +++ b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java @@ -18,13 +18,17 @@ package org.apache.hive.service.cli.operation; import java.io.CharArrayWriter; +import java.util.regex.Pattern; +import org.apache.hadoop.hive.ql.exec.Task; import org.apache.log4j.Layout; import org.apache.log4j.Logger; import org.apache.log4j.WriterAppender; import org.apache.log4j.spi.Filter; import org.apache.log4j.spi.LoggingEvent; +import com.google.common.base.Joiner; + /** * An Appender to divert logs from individual threads to the LogObject they belong to. */ @@ -33,20 +37,29 @@ private final OperationManager operationManager; /** - * A log filter that exclude messages coming from the logger with the given name. - * We apply this filter on the Loggers used by the log diversion stuff, so that + * A log filter that filters messages coming from the logger with the given names. + * It be used as a white list filter or a black list filter. + * We apply black list filter on the Loggers used by the log diversion stuff, so that * they don't generate more logs for themselves when they process logs. + * White list filter is used for less verbose log collection */ - private static class NameExclusionFilter extends Filter { - private String excludeLoggerName = null; + private static class NameFilter extends Filter { + private final Pattern namePattern; + private final boolean excludeMatches; - public NameExclusionFilter(String excludeLoggerName) { - this.excludeLoggerName = excludeLoggerName; + public NameFilter(boolean isExclusionFilter, String [] loggerNames) { + this.excludeMatches = isExclusionFilter; + String matchRegex = Joiner.on("|").join(loggerNames); + this.namePattern = Pattern.compile(matchRegex); } @Override public int decide(LoggingEvent ev) { - if (ev.getLoggerName().equals(excludeLoggerName)) { + boolean isMatch = namePattern.matcher(ev.getLoggerName()).matches(); + if (excludeMatches == isMatch) { + // Deny if this is black-list filter (excludeMatches = true) and it + // matched + // or if this is whitelist filter and it didn't match return Filter.DENY; } return Filter.NEUTRAL; @@ -56,21 +69,29 @@ public int decide(LoggingEvent ev) { /** This is where the log message will go to */ private final CharArrayWriter writer = new CharArrayWriter(); - public LogDivertAppender(Layout layout, OperationManager operationManager) { + public LogDivertAppender(Layout layout, OperationManager operationManager, boolean isVerbose) { setLayout(layout); setWriter(writer); setName("LogDivertAppender"); this.operationManager = operationManager; - // Filter out messages coming from log processing classes, or we'll run an infinite loop. - addFilter(new NameExclusionFilter(LOG.getName())); - addFilter(new NameExclusionFilter(OperationLog.class.getName())); - addFilter(new NameExclusionFilter(OperationManager.class.getName())); + if (isVerbose) { + // Filter out messages coming from log processing classes, or we'll run an + // infinite loop. + String[] exclLoggerNames = { LOG.getName(), OperationLog.class.getName(), + OperationManager.class.getName() }; + addFilter(new NameFilter(true, exclLoggerNames)); + } else { + // in non verbose mode, show only select logger messages + String[] inclLoggerNames = { "org.apache.hadoop.mapreduce.JobSubmitter", + "org.apache.hadoop.mapreduce.Job", "SessionState", Task.class.getName() }; + addFilter(new NameFilter(false, inclLoggerNames)); + } } /** - * Overrides WriterAppender.subAppend(), which does the real logging. - * No need to worry about concurrency since log4j calls this synchronously. + * Overrides WriterAppender.subAppend(), which does the real logging. No need + * to worry about concurrency since log4j calls this synchronously. */ @Override protected void subAppend(LoggingEvent event) { diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java index a57b6e5..76be713 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java +++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java @@ -18,8 +18,8 @@ package org.apache.hive.service.cli.operation; -import java.util.Enumeration; import java.util.ArrayList; +import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -30,16 +30,26 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hive.service.AbstractService; -import org.apache.hive.service.cli.*; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.OperationHandle; +import org.apache.hive.service.cli.OperationState; +import org.apache.hive.service.cli.OperationStatus; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.RowSetFactory; +import org.apache.hive.service.cli.TableSchema; import org.apache.hive.service.cli.session.HiveSession; -import org.apache.log4j.*; +import org.apache.log4j.Appender; +import org.apache.log4j.ConsoleAppender; +import org.apache.log4j.Layout; +import org.apache.log4j.Logger; +import org.apache.log4j.PatternLayout; /** * OperationManager. * */ public class OperationManager extends AbstractService { - private static final String DEFAULT_LAYOUT_PATTERN = "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n"; private final Log LOG = LogFactory.getLog(OperationManager.class.getName()); private HiveConf hiveConf; @@ -54,7 +64,8 @@ public OperationManager() { public synchronized void init(HiveConf hiveConf) { this.hiveConf = hiveConf; if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) { - initOperationLogCapture(); + boolean isVerbose = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_VERBOSE); + initOperationLogCapture(isVerbose); } else { LOG.debug("Operation level logging is turned off"); } @@ -73,7 +84,7 @@ public synchronized void stop() { super.stop(); } - private void initOperationLogCapture() { + private void initOperationLogCapture(boolean isVerbose) { // There should be a ConsoleAppender. Copy its Layout. Logger root = Logger.getRootLogger(); Layout layout = null; @@ -87,13 +98,19 @@ private void initOperationLogCapture() { } } - if (layout == null) { - layout = new PatternLayout(DEFAULT_LAYOUT_PATTERN); - LOG.info("Cannot find a Layout from a ConsoleAppender. Using default Layout pattern."); - } + final String VERBOSE_PATTERN = "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n"; + final String NONVERBOSE_PATTERN = "%-5p : %m%n"; + if (isVerbose) { + if (layout == null) { + layout = new PatternLayout(VERBOSE_PATTERN); + LOG.info("Cannot find a Layout from a ConsoleAppender. Using default Layout pattern."); + } + } else { + layout = new PatternLayout(NONVERBOSE_PATTERN); + } // Register another Appender (with the same layout) that talks to us. - Appender ap = new LogDivertAppender(layout, this); + Appender ap = new LogDivertAppender(layout, this, isVerbose); root.addAppender(ap); } diff --git a/service/src/java/org/apache/hive/service/cli/thrift/EmbeddedThriftBinaryCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/EmbeddedThriftBinaryCLIService.java index 61700c1..ac63537 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/EmbeddedThriftBinaryCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/EmbeddedThriftBinaryCLIService.java @@ -33,8 +33,13 @@ public EmbeddedThriftBinaryCLIService() { super(new CLIService(null)); isEmbedded = true; HiveConf.setLoadHiveServer2Config(true); - cliService.init(new HiveConf()); + } + + @Override + public synchronized void init(HiveConf hiveConf) { + cliService.init(hiveConf); cliService.start(); + super.init(hiveConf); } public ICLIService getService() { diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index cc974db..3a8ae70 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -55,7 +55,7 @@ protected static HiveAuthFactory hiveAuthFactory; protected int portNum; - protected InetAddress serverAddress; + protected InetAddress serverIPAddress; protected String hiveHost; protected TServer server; protected org.eclipse.jetty.server.Server httpServer; @@ -85,9 +85,9 @@ public synchronized void init(HiveConf hiveConf) { } try { if (hiveHost != null && !hiveHost.isEmpty()) { - serverAddress = InetAddress.getByName(hiveHost); + serverIPAddress = InetAddress.getByName(hiveHost); } else { - serverAddress = InetAddress.getLocalHost(); + serverIPAddress = InetAddress.getLocalHost(); } } catch (UnknownHostException e) { throw new ServiceException(e); @@ -153,8 +153,8 @@ public int getPortNumber() { return portNum; } - public InetAddress getServerAddress() { - return serverAddress; + public InetAddress getServerIPAddress() { + return serverIPAddress; } @Override diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java index e116c61..4503471 100644 --- a/service/src/java/org/apache/hive/service/server/HiveServer2.java +++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -250,10 +250,10 @@ private void setRegisteredWithZooKeeper(boolean registeredWithZooKeeper) { } private String getServerInstanceURI(HiveConf hiveConf) throws Exception { - if ((thriftCLIService == null) || (thriftCLIService.getServerAddress() == null)) { + if ((thriftCLIService == null) || (thriftCLIService.getServerIPAddress() == null)) { throw new Exception("Unable to get the server address; it hasn't been initialized yet."); } - return thriftCLIService.getServerAddress().getHostAddress() + ":" + return thriftCLIService.getServerIPAddress().getHostName() + ":" + thriftCLIService.getPortNumber(); } diff --git a/service/src/test/org/apache/hadoop/hive/service/TestHiveServerSessions.java b/service/src/test/org/apache/hadoop/hive/service/TestHiveServerSessions.java index fd38907..e69de29 100644 --- a/service/src/test/org/apache/hadoop/hive/service/TestHiveServerSessions.java +++ b/service/src/test/org/apache/hadoop/hive/service/TestHiveServerSessions.java @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.service; - -import junit.framework.TestCase; -import org.apache.thrift.protocol.TBinaryProtocol; -import org.apache.thrift.transport.TSocket; - -import java.io.IOException; -import java.net.ServerSocket; - -/** - * For testing HiveServer in server mode - * - */ -public class TestHiveServerSessions extends TestCase { - - private static final int clientNum = 2; - - private int port; - private Thread server; - - private TSocket[] transports = new TSocket[clientNum]; - private HiveClient[] clients = new HiveClient[clientNum]; - - public TestHiveServerSessions(String name) { - super(name); - } - - @Override - protected void setUp() throws Exception { - super.setUp(); - port = findFreePort(); - server = new Thread(new Runnable() { - public void run() { - HiveServer.main(new String[]{"-p", String.valueOf(port)}); - } - }); - server.start(); - Thread.sleep(5000); - - for (int i = 0; i < transports.length ; i++) { - TSocket transport = new TSocket("localhost", port); - transport.open(); - transports[i] = transport; - clients[i] = new HiveClient(new TBinaryProtocol(transport)); - } - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); - for (TSocket socket : transports) { - if (socket != null) { - try { - socket.close(); - } catch (Exception e) { - // ignroe - } - } - } - if (server != null) { - server.interrupt(); - } - } - - private int findFreePort() throws IOException { - ServerSocket socket= new ServerSocket(0); - int port = socket.getLocalPort(); - socket.close(); - return port; - } - - public void testSessionVars() throws Exception { - for (int i = 0; i < clients.length; i++) { - clients[i].execute("set hiveconf:var=value" + i); - } - - for (int i = 0; i < clients.length; i++) { - clients[i].execute("set hiveconf:var"); - assertEquals("hiveconf:var=value" + i, clients[i].fetchOne()); - } - } -} diff --git a/service/src/test/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java b/service/src/test/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java index 4e63a30..42bdf21 100644 --- a/service/src/test/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java +++ b/service/src/test/org/apache/hive/service/cli/operation/TestOperationLoggingAPI.java @@ -17,29 +17,38 @@ */ package org.apache.hive.service.cli.operation; -import org.junit.Assert; +import java.io.File; + import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hive.service.cli.*; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.FetchType; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.OperationHandle; +import org.apache.hive.service.cli.OperationState; +import org.apache.hive.service.cli.OperationStatus; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.SessionHandle; import org.apache.hive.service.cli.thrift.EmbeddedThriftBinaryCLIService; import org.apache.hive.service.cli.thrift.ThriftCLIServiceClient; import org.junit.After; +import org.junit.Assert; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; -import java.io.File; - /** * TestOperationLoggingAPI * Test the FetchResults of TFetchType.LOG in thrift level. */ public class TestOperationLoggingAPI { - private HiveConf hiveConf = new HiveConf(); - private String tableName = "testOperationLoggingAPI_table"; + private static HiveConf hiveConf; + private final String tableName = "testOperationLoggingAPI_table"; private File dataFile; private ThriftCLIServiceClient client; private SessionHandle sessionHandle; - private String sql = "select * from " + tableName; - private String[] expectedLogs = { + private final String sql = "select * from " + tableName; + private final String[] expectedLogs = { "Parsing command", "Parse Completed", "Starting Semantic Analysis", @@ -47,6 +56,12 @@ "Starting command" }; + @BeforeClass + public static void setUpBeforeClass() { + hiveConf = new HiveConf(); + hiveConf.setBoolean(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_VERBOSE.varname, true); + } + /** * Start embedded mode, open a session, and create a table for cases usage * @throws Exception @@ -247,7 +262,7 @@ private void verifyFetchedLog(RowSet rowSet) { private void verifyFetchedLog(String logs) { for (String log : expectedLogs) { - Assert.assertTrue(logs.contains(log)); + Assert.assertTrue("Checking for presence of " + log, logs.contains(log)); } } } diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index cf0cf43..b17ff69 100644 --- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -552,7 +552,13 @@ public HdfsFileStatus getFullFileStatus(Configuration conf, FileSystem fs, FileStatus fileStatus = fs.getFileStatus(file); AclStatus aclStatus = null; if (isExtendedAclEnabled(conf)) { - aclStatus = fs.getAclStatus(file); + //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless. + try { + aclStatus = fs.getAclStatus(file); + } catch (Exception e) { + LOG.info("Skipping ACL inheritance: File system for path " + file + " " + + "does not support ACLs but dfs.namenode.acls.enabled is set to true: " + e, e); + } } return new Hadoop23FileStatus(fileStatus, aclStatus); } @@ -568,19 +574,25 @@ public void setFullFileStatus(Configuration conf, HdfsFileStatus sourceStatus, run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()}); if (isExtendedAclEnabled(conf)) { - AclStatus aclStatus = ((Hadoop23FileStatus) sourceStatus).getAclStatus(); - List aclEntries = aclStatus.getEntries(); - removeBaseAclEntries(aclEntries); - - //the ACL api's also expect the tradition user/group/other permission in the form of ACL - FsPermission sourcePerm = sourceStatus.getFileStatus().getPermission(); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); - aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); - - //construct the -setfacl command - String aclEntry = Joiner.on(",").join(aclStatus.getEntries()); - run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); + //Attempt extended Acl operations only if its enabled, 8791but don't fail the operation regardless. + try { + AclStatus aclStatus = ((Hadoop23FileStatus) sourceStatus).getAclStatus(); + List aclEntries = aclStatus.getEntries(); + removeBaseAclEntries(aclEntries); + + //the ACL api's also expect the tradition user/group/other permission in the form of ACL + FsPermission sourcePerm = sourceStatus.getFileStatus().getPermission(); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction())); + aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction())); + + //construct the -setfacl command + String aclEntry = Joiner.on(",").join(aclStatus.getEntries()); + run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()}); + } catch (Exception e) { + LOG.info("Skipping ACL inheritance: File system for path " + target + " " + + "does not support ACLs but dfs.namenode.acls.enabled is set to true: " + e, e); + } } else { String permission = Integer.toString(sourceStatus.getFileStatus().getPermission().toShort(), 8); run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()});