diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 8e072f7..9363cc6 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1950,7 +1950,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { " none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" + " column: implies column names can contain any character." ), - + HIVE_SUPPORT_SQL11_RESERVED_KEYWORDS("hive.support.sql11.reserved.keywords", true, + "This flag should be set to true to enable support for SQL2011 reserved keywords.\n" + + "The default value is true."), // role names are case-insensitive USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false, "Comma separated list of users who are in admin role for bootstrapping.\n" + diff --git a/contrib/src/test/queries/clientnegative/serde_regex.q b/contrib/src/test/queries/clientnegative/serde_regex.q index 7ad3142..a676338 100644 --- a/contrib/src/test/queries/clientnegative/serde_regex.q +++ b/contrib/src/test/queries/clientnegative/serde_regex.q @@ -7,7 +7,7 @@ EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status INT, @@ -24,7 +24,7 @@ STORED AS TEXTFILE; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status INT, diff --git a/contrib/src/test/queries/clientpositive/serde_regex.q b/contrib/src/test/queries/clientpositive/serde_regex.q index 5c2d2ca..466f9a6 100644 --- a/contrib/src/test/queries/clientpositive/serde_regex.q +++ b/contrib/src/test/queries/clientpositive/serde_regex.q @@ -4,7 +4,7 @@ EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -21,7 +21,7 @@ STORED AS TEXTFILE; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, diff --git a/contrib/src/test/results/clientnegative/serde_regex.q.out b/contrib/src/test/results/clientnegative/serde_regex.q.out index 988bf10..0f9b036 100644 --- a/contrib/src/test/results/clientnegative/serde_regex.q.out +++ b/contrib/src/test/results/clientnegative/serde_regex.q.out @@ -9,7 +9,7 @@ EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status INT, @@ -28,7 +28,7 @@ EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status INT, @@ -61,7 +61,7 @@ STAGE PLANS: PREHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status INT, diff --git a/contrib/src/test/results/clientpositive/serde_regex.q.out b/contrib/src/test/results/clientpositive/serde_regex.q.out index dc97cb3..2984293 100644 --- a/contrib/src/test/results/clientpositive/serde_regex.q.out +++ b/contrib/src/test/results/clientpositive/serde_regex.q.out @@ -2,7 +2,7 @@ PREHOOK: query: EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -20,7 +20,7 @@ POSTHOOK: query: EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -53,7 +53,7 @@ STAGE PLANS: PREHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -72,7 +72,7 @@ PREHOOK: Output: default@serde_regex POSTHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java index 6f45a59..9cc5c17 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationReads.java @@ -60,7 +60,7 @@ private void readTableByOtherUser(String perm, boolean isSuccess) throws Excepti setPermissions(db.getLocationUri(), "-rwxrwxrwx"); String dbDotTable = dbName + "." + tblName; - resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (date string)"); + resp = driver.run("create table " + dbDotTable + "(i int) partitioned by (`date` string)"); Assert.assertEquals(0, resp.getResponseCode()); Table tab = msc.getTable(dbName, tblName); setPermissions(tab.getSd().getLocation(), perm); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index 79cf58b..50fe7be 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -94,7 +94,7 @@ public static void beforeTest() throws Exception { SessionState.start(conf); driver = new Driver(conf); runCmd("create table " + tableName - + " (i int, j int, k string) partitioned by (city string, date string) "); + + " (i int, j int, k string) partitioned by (city string, `date` string) "); runCmd("create database " + dbName); // Need a separate table for ACID testing since it has to be bucketed and it has to be Acid runCmd("create table " + acidTableName + " (i int, j int) clustered by (i) into 2 buckets " + diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java index c4dccba..ddb4730 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java @@ -123,7 +123,7 @@ public static void beforeTest() throws Exception { SessionState.start(conf); driver = new Driver(conf); runCmd("create table " + tableName1 - + " (i int, j int, k string) partitioned by (city string, date string) "); + + " (i int, j int, k string) partitioned by (city string, `date` string) "); runCmd("create table " + tableName2 + "(i int)"); runCmd("create database " + dbName1); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g index b72ee5d..038ed99 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11ReservedKeywordsForIdentifier() { + return gParent.useSQL11ReservedKeywordsForIdentifier(); + } } @rulecatch { @@ -126,7 +129,7 @@ lateralView @init {gParent.pushMsg("lateral view", state); } @after {gParent.popMsg(state); } : - KW_LATERAL KW_VIEW KW_OUTER function tableAlias (KW_AS identifier ((COMMA)=> COMMA identifier)*)? + (KW_LATERAL KW_VIEW KW_OUTER) => KW_LATERAL KW_VIEW KW_OUTER function tableAlias (KW_AS identifier ((COMMA)=> COMMA identifier)*)? -> ^(TOK_LATERAL_VIEW_OUTER ^(TOK_SELECT ^(TOK_SELEXPR function identifier* tableAlias))) | KW_LATERAL KW_VIEW function tableAlias (KW_AS identifier ((COMMA)=> COMMA identifier)*)? @@ -177,7 +180,12 @@ tableSample tableSource @init { gParent.pushMsg("table source", state); } @after { gParent.popMsg(state); } - : tabname=tableName (props=tableProperties)? (ts=tableSample)? (KW_AS? alias=Identifier)? + : tabname=tableName + ((tableProperties) => props=tableProperties)? + ((tableSample) => ts=tableSample)? + ((KW_AS) => (KW_AS alias=Identifier) + | + (Identifier) => (alias=Identifier))? -> ^(TOK_TABREF $tabname $props? $ts? $alias?) ; @@ -232,11 +240,11 @@ partitionedTableFunction @init { gParent.pushMsg("ptf clause", state); } @after { gParent.popMsg(state); } : - name=Identifier - LPAREN KW_ON ptfsrc=partitionTableFunctionSource partitioningSpec? - ((Identifier LPAREN expression RPAREN ) => Identifier LPAREN expression RPAREN ( COMMA Identifier LPAREN expression RPAREN)*)? - RPAREN alias=Identifier? - -> ^(TOK_PTBLFUNCTION $name $alias? partitionTableFunctionSource partitioningSpec? expression*) + name=Identifier LPAREN KW_ON + ((partitionTableFunctionSource) => (ptfsrc=partitionTableFunctionSource spec=partitioningSpec?)) + ((Identifier LPAREN expression RPAREN ) => Identifier LPAREN expression RPAREN ( COMMA Identifier LPAREN expression RPAREN)*)? + ((RPAREN) => (RPAREN)) ((Identifier) => alias=Identifier)? + -> ^(TOK_PTBLFUNCTION $name $alias? $ptfsrc $spec? expression*) ; //----------------------- Rules for parsing whereClause ----------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index 90b84ac..e7de6c8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -42,7 +42,6 @@ KW_TRUE : 'TRUE'; KW_FALSE : 'FALSE'; KW_ALL : 'ALL'; KW_NONE: 'NONE'; -KW_DEFAULT : 'DEFAULT'; KW_AND : 'AND'; KW_OR : 'OR'; KW_NOT : 'NOT' | '!'; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 72b852e..ed1fc78 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -354,6 +354,8 @@ package org.apache.hadoop.hive.ql.parse; import java.util.Collection; import java.util.HashMap; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; } @@ -370,7 +372,6 @@ import java.util.HashMap; xlateMap.put("KW_FALSE", "FALSE"); xlateMap.put("KW_ALL", "ALL"); xlateMap.put("KW_NONE", "NONE"); - xlateMap.put("KW_DEFAULT", "DEFAULT"); xlateMap.put("KW_AND", "AND"); xlateMap.put("KW_OR", "OR"); xlateMap.put("KW_NOT", "NOT"); @@ -620,6 +621,13 @@ import java.util.HashMap; private CommonTree throwSetOpException() throws RecognitionException { throw new FailedPredicateException(input, "orderByClause clusterByClause distributeByClause sortByClause limitClause can only be applied to the whole union.", ""); } + private Configuration hiveConf; + public void setHiveConf(Configuration hiveConf) { + this.hiveConf = hiveConf; + } + protected boolean useSQL11ReservedKeywordsForIdentifier() { + return !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_SUPPORT_SQL11_RESERVED_KEYWORDS); + } } @rulecatch { @@ -712,8 +720,8 @@ ddlStatement | unlockDatabase | createRoleStatement | dropRoleStatement - | grantPrivileges - | revokePrivileges + | (grantPrivileges) => grantPrivileges + | (revokePrivileges) => revokePrivileges | showGrants | showRoleGrants | showRolePrincipals @@ -955,8 +963,7 @@ alterStatement alterTableStatementSuffix @init { pushMsg("alter table statement", state); } @after { popMsg(state); } - : alterStatementSuffixRename[true] - | alterStatementSuffixUpdateStatsCol + : (alterStatementSuffixRename[true]) => alterStatementSuffixRename[true] | alterStatementSuffixDropPartitions[true] | alterStatementSuffixAddPartitions[true] | alterStatementSuffixTouch @@ -1297,15 +1304,15 @@ fileFormat tabTypeExpr @init { pushMsg("specifying table types", state); } @after { popMsg(state); } - - : identifier (DOT^ (KW_ELEM_TYPE | KW_KEY_TYPE | KW_VALUE_TYPE | identifier))* - ; - -descTabTypeExpr -@init { pushMsg("specifying describe table types", state); } -@after { popMsg(state); } - - : identifier (DOT^ (KW_ELEM_TYPE | KW_KEY_TYPE | KW_VALUE_TYPE | identifier))* identifier? + : identifier (DOT^ + ( + (KW_ELEM_TYPE) => KW_ELEM_TYPE + | + (KW_KEY_TYPE) => KW_KEY_TYPE + | + (KW_VALUE_TYPE) => KW_VALUE_TYPE + | identifier + ))* identifier? ; partTypeExpr @@ -1314,21 +1321,22 @@ partTypeExpr : tabTypeExpr partitionSpec? -> ^(TOK_TABTYPE tabTypeExpr partitionSpec?) ; -descPartTypeExpr -@init { pushMsg("specifying describe table partitions", state); } -@after { popMsg(state); } - : descTabTypeExpr partitionSpec? -> ^(TOK_TABTYPE descTabTypeExpr partitionSpec?) - ; - descStatement @init { pushMsg("describe statement", state); } @after { popMsg(state); } - : (KW_DESCRIBE|KW_DESC) (KW_DATABASE|KW_SCHEMA) KW_EXTENDED? (dbName=identifier) -> ^(TOK_DESCDATABASE $dbName KW_EXTENDED?) - | (KW_DESCRIBE|KW_DESC) (descOptions=KW_FORMATTED|descOptions=KW_EXTENDED|descOptions=KW_PRETTY)? (parttype=descPartTypeExpr) -> ^(TOK_DESCTABLE $parttype $descOptions?) - | (KW_DESCRIBE|KW_DESC) KW_FUNCTION KW_EXTENDED? (name=descFuncNames) -> ^(TOK_DESCFUNCTION $name KW_EXTENDED?) + : + (KW_DESCRIBE|KW_DESC) + ( + (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) KW_EXTENDED? (dbName=identifier) -> ^(TOK_DESCDATABASE $dbName KW_EXTENDED?) + | + (KW_FUNCTION) => KW_FUNCTION KW_EXTENDED? (name=descFuncNames) -> ^(TOK_DESCFUNCTION $name KW_EXTENDED?) + | + (KW_FORMATTED|KW_EXTENDED|KW_PRETTY) => ((descOptions=KW_FORMATTED|descOptions=KW_EXTENDED|descOptions=KW_PRETTY) parttype=partTypeExpr) -> ^(TOK_DESCTABLE $parttype $descOptions) + | + parttype=partTypeExpr -> ^(TOK_DESCTABLE $parttype) + ) ; - analyzeStatement @init { pushMsg("analyze statement", state); } @after { popMsg(state); } @@ -1350,8 +1358,12 @@ showStatement | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=identifier)? KW_LIKE showStmtIdentifier partitionSpec? -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?) | KW_SHOW KW_TBLPROPERTIES tableName (LPAREN prptyName=StringLiteral RPAREN)? -> ^(TOK_SHOW_TBLPROPERTIES tableName $prptyName?) - | KW_SHOW KW_LOCKS (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?) - | KW_SHOW KW_LOCKS (KW_DATABASE|KW_SCHEMA) (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?) + | KW_SHOW KW_LOCKS + ( + (KW_DATABASE|KW_SCHEMA) => (KW_DATABASE|KW_SCHEMA) (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?) + | + (parttype=partTypeExpr)? (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWLOCKS $parttype? $isExtended?) + ) | KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)? -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?) | KW_SHOW KW_COMPACTIONS -> ^(TOK_SHOW_COMPACTIONS) @@ -1459,8 +1471,12 @@ showCurrentRole setRole @init {pushMsg("set role", state);} @after {popMsg(state);} - : KW_SET KW_ROLE roleName=identifier - -> ^(TOK_SHOW_SET_ROLE $roleName) + : KW_SET KW_ROLE + ( + KW_ALL -> ^(TOK_SHOW_SET_ROLE KW_ALL) + | + identifier -> ^(TOK_SHOW_SET_ROLE identifier) + ) ; showGrants @@ -1720,7 +1736,7 @@ tableSkewed @init { pushMsg("table skewed specification", state); } @after { popMsg(state); } : - KW_SKEWED KW_BY LPAREN skewedCols=columnNameList RPAREN KW_ON LPAREN (skewedValues=skewedValueElement) RPAREN (storedAsDirs)? + KW_SKEWED KW_BY LPAREN skewedCols=columnNameList RPAREN KW_ON LPAREN (skewedValues=skewedValueElement) RPAREN ((storedAsDirs) => storedAsDirs)? -> ^(TOK_TABLESKEWED $skewedCols $skewedValues storedAsDirs?) ; @@ -1851,7 +1867,7 @@ tableFileFormat @init { pushMsg("table file format specification", state); } @after { popMsg(state); } : - KW_STORED KW_AS KW_INPUTFORMAT inFmt=StringLiteral KW_OUTPUTFORMAT outFmt=StringLiteral (KW_INPUTDRIVER inDriver=StringLiteral KW_OUTPUTDRIVER outDriver=StringLiteral)? + (KW_STORED KW_AS KW_INPUTFORMAT) => KW_STORED KW_AS KW_INPUTFORMAT inFmt=StringLiteral KW_OUTPUTFORMAT outFmt=StringLiteral (KW_INPUTDRIVER inDriver=StringLiteral KW_OUTPUTDRIVER outDriver=StringLiteral)? -> ^(TOK_TABLEFILEFORMAT $inFmt $outFmt $inDriver? $outDriver?) | KW_STORED KW_BY storageHandler=StringLiteral (KW_WITH KW_SERDEPROPERTIES serdeprops=tableProperties)? @@ -2231,7 +2247,7 @@ simpleSelectStatement whereClause? groupByClause? havingClause? - window_clause? + ((window_clause) => window_clause)? -> ^(TOK_QUERY fromClause? ^(TOK_INSERT ^(TOK_DESTINATION ^(TOK_DIR TOK_TMP_FILE)) selectClause whereClause? groupByClause? havingClause? window_clause?)) ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index cabf971..978bbd5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11ReservedKeywordsForIdentifier() { + return gParent.useSQL11ReservedKeywordsForIdentifier(); + } } @rulecatch { @@ -51,40 +54,41 @@ groupByClause @after { gParent.popMsg(state); } : KW_GROUP KW_BY - groupByExpression - ( COMMA groupByExpression )* + expression + ( COMMA expression)* ((rollup=KW_WITH KW_ROLLUP) | (cube=KW_WITH KW_CUBE)) ? (sets=KW_GROUPING KW_SETS LPAREN groupingSetExpression ( COMMA groupingSetExpression)* RPAREN ) ? - -> {rollup != null}? ^(TOK_ROLLUP_GROUPBY groupByExpression+) - -> {cube != null}? ^(TOK_CUBE_GROUPBY groupByExpression+) - -> {sets != null}? ^(TOK_GROUPING_SETS groupByExpression+ groupingSetExpression+) - -> ^(TOK_GROUPBY groupByExpression+) + -> {rollup != null}? ^(TOK_ROLLUP_GROUPBY expression+) + -> {cube != null}? ^(TOK_CUBE_GROUPBY expression+) + -> {sets != null}? ^(TOK_GROUPING_SETS expression+ groupingSetExpression+) + -> ^(TOK_GROUPBY expression+) ; groupingSetExpression @init {gParent.pushMsg("grouping set expression", state); } @after {gParent.popMsg(state); } : - groupByExpression - -> ^(TOK_GROUPING_SETS_EXPRESSION groupByExpression) + (LPAREN) => groupingSetExpressionMultiple | + groupingExpressionSingle + ; + +groupingSetExpressionMultiple +@init {gParent.pushMsg("grouping set part expression", state); } +@after {gParent.popMsg(state); } + : LPAREN - groupByExpression (COMMA groupByExpression)* + expression? (COMMA expression)* RPAREN - -> ^(TOK_GROUPING_SETS_EXPRESSION groupByExpression+) - | - LPAREN - RPAREN - -> ^(TOK_GROUPING_SETS_EXPRESSION) + -> ^(TOK_GROUPING_SETS_EXPRESSION expression*) ; - -groupByExpression -@init { gParent.pushMsg("group by expression", state); } +groupingExpressionSingle +@init { gParent.pushMsg("groupingExpression expression", state); } @after { gParent.popMsg(state); } : - expression + expression -> ^(TOK_GROUPING_SETS_EXPRESSION expression) ; havingClause @@ -101,6 +105,26 @@ havingCondition expression ; +expressionsInParenthese + : + LPAREN expression (COMMA expression)* RPAREN -> expression+ + ; + +expressionsNotInParenthese + : + expression (COMMA expression)* -> expression+ + ; + +columnRefOrderInParenthese + : + LPAREN columnRefOrder (COMMA columnRefOrder)* RPAREN -> columnRefOrder+ + ; + +columnRefOrderNotInParenthese + : + columnRefOrder (COMMA columnRefOrder)* -> columnRefOrder+ + ; + // order by a,b orderByClause @init { gParent.pushMsg("order by clause", state); } @@ -108,17 +132,17 @@ orderByClause : KW_ORDER KW_BY columnRefOrder ( COMMA columnRefOrder)* -> ^(TOK_ORDERBY columnRefOrder+) ; - + clusterByClause @init { gParent.pushMsg("cluster by clause", state); } @after { gParent.popMsg(state); } : KW_CLUSTER KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_CLUSTERBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_CLUSTERBY expressionsInParenthese) | - KW_CLUSTER KW_BY - expression - ( (COMMA)=>COMMA expression )* -> ^(TOK_CLUSTERBY expression+) + expressionsNotInParenthese -> ^(TOK_CLUSTERBY expressionsNotInParenthese) + ) ; partitionByClause @@ -126,10 +150,11 @@ partitionByClause @after { gParent.popMsg(state); } : KW_PARTITION KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_DISTRIBUTEBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_DISTRIBUTEBY expressionsInParenthese) | - KW_PARTITION KW_BY - expression ((COMMA)=> COMMA expression)* -> ^(TOK_DISTRIBUTEBY expression+) + expressionsNotInParenthese -> ^(TOK_DISTRIBUTEBY expressionsNotInParenthese) + ) ; distributeByClause @@ -137,10 +162,11 @@ distributeByClause @after { gParent.popMsg(state); } : KW_DISTRIBUTE KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_DISTRIBUTEBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_DISTRIBUTEBY expressionsInParenthese) | - KW_DISTRIBUTE KW_BY - expression ((COMMA)=> COMMA expression)* -> ^(TOK_DISTRIBUTEBY expression+) + expressionsNotInParenthese -> ^(TOK_DISTRIBUTEBY expressionsNotInParenthese) + ) ; sortByClause @@ -148,12 +174,11 @@ sortByClause @after { gParent.popMsg(state); } : KW_SORT KW_BY - LPAREN columnRefOrder - ( COMMA columnRefOrder)* RPAREN -> ^(TOK_SORTBY columnRefOrder+) + ( + (LPAREN) => columnRefOrderInParenthese -> ^(TOK_SORTBY columnRefOrderInParenthese) | - KW_SORT KW_BY - columnRefOrder - ( (COMMA)=> COMMA columnRefOrder)* -> ^(TOK_SORTBY columnRefOrder+) + columnRefOrderNotInParenthese -> ^(TOK_SORTBY columnRefOrderNotInParenthese) + ) ; // fun(par1, par2, par3) @@ -164,7 +189,7 @@ function functionName LPAREN ( - (star=STAR) + (STAR) => (star=STAR) | (dist=KW_DISTINCT)? (selectExpression (COMMA selectExpression)*)? ) RPAREN (KW_OVER ws=window_specification)? @@ -173,29 +198,15 @@ function -> ^(TOK_FUNCTIONDI functionName (selectExpression+)?) ; -nonParenthesizedFunction -@init { gParent.pushMsg("non-parenthesized function name", state); } -@after { gParent.popMsg(state); } - : - nonParenthesizedFunctionName - -> ^(TOK_FUNCTION nonParenthesizedFunctionName) - ; - -nonParenthesizedFunctionName -@init { gParent.pushMsg("non-parenthesized function name", state); } -@after { gParent.popMsg(state); } - : - KW_CURRENT_DATE | KW_CURRENT_TIMESTAMP - ; - functionName @init { gParent.pushMsg("function name", state); } @after { gParent.popMsg(state); } : // Keyword IF is also a function name - KW_IF | KW_ARRAY | KW_MAP | KW_STRUCT | KW_UNIONTYPE | functionIdentifier + (KW_IF | KW_ARRAY | KW_MAP | KW_STRUCT | KW_UNIONTYPE) => (KW_IF | KW_ARRAY | KW_MAP | KW_STRUCT | KW_UNIONTYPE) + | + (functionIdentifier) => functionIdentifier | - // This allows current_timestamp() to work as well as current_timestamp - nonParenthesizedFunctionName + sql11ReservedKeywordsUsedAsCastFunctionName -> Identifier[$sql11ReservedKeywordsUsedAsCastFunctionName.text] ; castExpression @@ -267,6 +278,8 @@ dateLiteral // This makes the dateLiteral more consistent with the other type literals. adaptor.create(TOK_DATELITERAL, $StringLiteral.text) } + | + KW_CURRENT_DATE -> ^(TOK_FUNCTION KW_CURRENT_DATE) ; timestampLiteral @@ -275,6 +288,8 @@ timestampLiteral { adaptor.create(TOK_TIMESTAMPLITERAL, $StringLiteral.text) } + | + KW_CURRENT_TIMESTAMP -> ^(TOK_FUNCTION KW_CURRENT_TIMESTAMP) ; expression @@ -287,11 +302,10 @@ expression atomExpression : KW_NULL -> TOK_NULL - | constant + | (constant) => constant | castExpression | caseExpression | whenExpression - | nonParenthesizedFunction | (functionName LPAREN) => function | tableOrColumn | LPAREN! expression RPAREN! @@ -543,7 +557,7 @@ sysFuncNames descFuncNames : - sysFuncNames + (sysFuncNames) => sysFuncNames | StringLiteral | functionIdentifier ; @@ -552,6 +566,9 @@ identifier : Identifier | nonReserved -> Identifier[$nonReserved.text] + // If it decides to support SQL11 reserved keywords, i.e., useSQL11ReservedKeywordsForIdentifier()=false, + // the sql11keywords in existing q tests will NOT be added back. + | {useSQL11ReservedKeywordsForIdentifier()}? sql11ReservedKeywordsUsedAsIdentifier -> Identifier[$sql11ReservedKeywordsUsedAsIdentifier.text] ; functionIdentifier @@ -572,5 +589,40 @@ principalIdentifier nonReserved : - KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION | KW_VALUES | KW_URI | KW_SERVER | KW_RELOAD + KW_ADD | KW_ADMIN | KW_AFTER | KW_ANALYZE | KW_ARCHIVE | KW_ASC | KW_BEFORE | KW_BUCKET | KW_BUCKETS + | KW_CASCADE | KW_CHANGE | KW_CLUSTER | KW_CLUSTERED | KW_CLUSTERSTATUS | KW_COLLECTION | KW_COLUMNS + | KW_COMMENT | KW_COMPACT | KW_COMPACTIONS | KW_COMPUTE | KW_CONCATENATE | KW_CONTINUE | KW_CURRENT_DATE + | KW_CURRENT_TIMESTAMP | KW_DATA | KW_DATABASES | KW_DATETIME | KW_DBPROPERTIES | KW_DEFERRED | KW_DEFINED + | KW_DELIMITED | KW_DEPENDENCY | KW_DESC | KW_DIRECTORIES | KW_DIRECTORY | KW_DISABLE | KW_DISTRIBUTE + | KW_ELEM_TYPE | KW_ENABLE | KW_ESCAPED | KW_EXCLUSIVE | KW_EXPLAIN | KW_EXPORT | KW_FIELDS | KW_FILE | KW_FILEFORMAT + | KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME | KW_IDXPROPERTIES | KW_IGNORE + | KW_INDEX | KW_INDEXES | KW_INNER | KW_INPATH | KW_INPUTDRIVER | KW_INPUTFORMAT | KW_ITEMS | KW_JAR + | KW_KEYS | KW_KEY_TYPE | KW_LIMIT | KW_LINES | KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG + | KW_MAPJOIN | KW_MATERIALIZED | KW_MINUS | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_OFFLINE | KW_OPTION + | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER | KW_PARTITIONED | KW_PARTITIONS | KW_PLUS | KW_PRETTY | KW_PRINCIPALS + | KW_PROTECTION | KW_PURGE | KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER + | KW_REGEXP | KW_RELOAD | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_RESTRICT | KW_REWRITE | KW_RLIKE + | KW_ROLE | KW_ROLES | KW_SCHEMA | KW_SCHEMAS | KW_SEMI | KW_SERDE | KW_SERDEPROPERTIES | KW_SERVER | KW_SETS | KW_SHARED + | KW_SHOW | KW_SHOW_DATABASE | KW_SKEWED | KW_SORT | KW_SORTED | KW_SSL | KW_STATISTICS | KW_STORED + | KW_STREAMTABLE | KW_STRING | KW_STRUCT | KW_TABLES | KW_TBLPROPERTIES | KW_TEMPORARY | KW_TERMINATED + | KW_TINYINT | KW_TOUCH | KW_TRANSACTIONS | KW_UNARCHIVE | KW_UNDO | KW_UNIONTYPE | KW_UNLOCK | KW_UNSET + | KW_UNSIGNED | KW_URI | KW_USE | KW_UTC | KW_UTCTIMESTAMP | KW_VALUE_TYPE | KW_VIEW | KW_WHILE + ; + +sql11ReservedKeywordsUsedAsCastFunctionName + : + KW_BIGINT | KW_BINARY | KW_BOOLEAN | KW_DATE | KW_DOUBLE | KW_FLOAT | KW_INT | KW_SMALLINT | KW_TIMESTAMP + ; + +//The following SQL2011 reserved keywords are used as identifiers in many q tests, they may be added back due to backward compatibility. +sql11ReservedKeywordsUsedAsIdentifier + : + KW_ALL | KW_ALTER | KW_ARRAY | KW_AS | KW_AUTHORIZATION | KW_BETWEEN | KW_BIGINT | KW_BINARY | KW_BOOLEAN + | KW_BOTH | KW_BY | KW_CREATE | KW_CUBE | KW_CURSOR | KW_DATE | KW_DECIMAL | KW_DELETE | KW_DESCRIBE + | KW_DOUBLE | KW_DROP | KW_EXISTS | KW_EXTERNAL | KW_FALSE | KW_FETCH | KW_FLOAT | KW_FOR | KW_FULL | KW_GRANT + | KW_GROUP | KW_GROUPING | KW_IMPORT | KW_IN | KW_INSERT | KW_INT | KW_INTERSECT | KW_INTO | KW_IS | KW_LATERAL + | KW_LEFT | KW_LIKE | KW_LOCAL | KW_NONE | KW_NULL | KW_OF | KW_ORDER | KW_OUT | KW_OUTER | KW_PARTITION + | KW_PERCENT | KW_PROCEDURE | KW_RANGE | KW_READS | KW_REVOKE | KW_RIGHT + | KW_ROLLUP | KW_ROW | KW_ROWS | KW_SET | KW_SMALLINT | KW_TABLE | KW_TIMESTAMP | KW_TO | KW_TRIGGER | KW_TRUE + | KW_TRUNCATE | KW_UNION | KW_UPDATE | KW_USER | KW_USING | KW_VALUES | KW_WITH ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java index a24cad9..debd5ac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java @@ -193,6 +193,9 @@ public ASTNode parse(String command, Context ctx, boolean setTokenRewriteStream) lexer.setHiveConf(ctx.getConf()); } HiveParser parser = new HiveParser(tokens); + if (ctx != null) { + parser.setHiveConf(ctx.getConf()); + } parser.setTreeAdaptor(adaptor); HiveParser.statement_return r = null; try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g index eba3689..1dcf392 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11ReservedKeywordsForIdentifier() { + return gParent.useSQL11ReservedKeywordsForIdentifier(); + } } @rulecatch { @@ -125,10 +128,11 @@ selectItem @init { gParent.pushMsg("selection target", state); } @after { gParent.popMsg(state); } : + (tableAllColumns) => tableAllColumns -> ^(TOK_SELEXPR tableAllColumns) + | ( expression ((KW_AS? identifier) | (KW_AS LPAREN identifier (COMMA identifier)* RPAREN))? ) -> ^(TOK_SELEXPR expression identifier*) - | tableAllColumns -> ^(TOK_SELEXPR tableAllColumns) ; trfmClause @@ -148,7 +152,9 @@ selectExpression @init { gParent.pushMsg("select expression", state); } @after { gParent.popMsg(state); } : - expression | tableAllColumns + (tableAllColumns) => tableAllColumns + | + expression ; selectExpressionList diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java index eed162b..52eb461 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java @@ -147,11 +147,11 @@ public void testStandardInsertIntoTable() throws ParseException { @Test public void testSelectStarFromAnonymousVirtTable1Row() throws ParseException { try { - parse("select * from values (3,4)"); - Assert.assertFalse("Expected ParseException", true); + parse("select * from `values` (3,4)"); + Assert.assertFalse("Expected Exception", true); } - catch(ParseException ex) { - Assert.assertEquals("Failure didn't match.", "line 1:21 missing EOF at '(' near 'values'",ex.getMessage()); + catch(Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); } } @Test diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsNegative.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsNegative.java new file mode 100644 index 0000000..3abe03b --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsNegative.java @@ -0,0 +1,764 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Parser tests for SQL11 Reserved KeyWords. Please find more information in + * HIVE-6617. Total number : 71 + */ +public class TestSQL11ReservedKeyWordsNegative { + private static HiveConf conf; + + private ParseDriver pd; + private SemanticAnalyzer sA; + + @BeforeClass + public static void initialize() { + conf = new HiveConf(SemanticAnalyzer.class); + SessionState.start(conf); + } + + @Before + public void setup() throws SemanticException { + pd = new ParseDriver(); + sA = new CalcitePlanner(conf); + } + + ASTNode parse(String query) throws ParseException { + ASTNode nd = pd.parse(query); + return (ASTNode) nd.getChild(0); + } + + @Test + public void testSQL11ReservedKeyWords_ALL() throws ParseException { + try { + parse("CREATE TABLE ALL (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_ALTER() throws ParseException { + try { + parse("CREATE TABLE ALTER (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_ARRAY() throws ParseException { + try { + parse("CREATE TABLE ARRAY (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_AS() throws ParseException { + try { + parse("CREATE TABLE AS (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_AUTHORIZATION() throws ParseException { + try { + parse("CREATE TABLE AUTHORIZATION (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_BETWEEN() throws ParseException { + try { + parse("CREATE TABLE BETWEEN (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_BIGINT() throws ParseException { + try { + parse("CREATE TABLE BIGINT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_BINARY() throws ParseException { + try { + parse("CREATE TABLE BINARY (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_BOOLEAN() throws ParseException { + try { + parse("CREATE TABLE BOOLEAN (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_BOTH() throws ParseException { + try { + parse("CREATE TABLE BOTH (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_BY() throws ParseException { + try { + parse("CREATE TABLE BY (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_CREATE() throws ParseException { + try { + parse("CREATE TABLE CREATE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_CUBE() throws ParseException { + try { + parse("CREATE TABLE CUBE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_CURSOR() throws ParseException { + try { + parse("CREATE TABLE CURSOR (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_DATE() throws ParseException { + try { + parse("CREATE TABLE DATE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_DECIMAL() throws ParseException { + try { + parse("CREATE TABLE DECIMAL (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_DELETE() throws ParseException { + try { + parse("CREATE TABLE DELETE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_DESCRIBE() throws ParseException { + try { + parse("CREATE TABLE DESCRIBE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_DOUBLE() throws ParseException { + try { + parse("CREATE TABLE DOUBLE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_DROP() throws ParseException { + try { + parse("CREATE TABLE DROP (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_EXISTS() throws ParseException { + try { + parse("CREATE TABLE EXISTS (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_EXTERNAL() throws ParseException { + try { + parse("CREATE TABLE EXTERNAL (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_FALSE() throws ParseException { + try { + parse("CREATE TABLE FALSE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_FETCH() throws ParseException { + try { + parse("CREATE TABLE FETCH (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_FLOAT() throws ParseException { + try { + parse("CREATE TABLE FLOAT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_FOR() throws ParseException { + try { + parse("CREATE TABLE FOR (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_FULL() throws ParseException { + try { + parse("CREATE TABLE FULL (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_GRANT() throws ParseException { + try { + parse("CREATE TABLE GRANT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_GROUP() throws ParseException { + try { + parse("CREATE TABLE GROUP (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_GROUPING() throws ParseException { + try { + parse("CREATE TABLE GROUPING (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_IMPORT() throws ParseException { + try { + parse("CREATE TABLE IMPORT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_IN() throws ParseException { + try { + parse("CREATE TABLE IN (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_INSERT() throws ParseException { + try { + parse("CREATE TABLE INSERT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_INT() throws ParseException { + try { + parse("CREATE TABLE INT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_INTERSECT() throws ParseException { + try { + parse("CREATE TABLE INTERSECT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_INTO() throws ParseException { + try { + parse("CREATE TABLE INTO (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_IS() throws ParseException { + try { + parse("CREATE TABLE IS (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_LATERAL() throws ParseException { + try { + parse("CREATE TABLE LATERAL (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_LEFT() throws ParseException { + try { + parse("CREATE TABLE LEFT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_LIKE() throws ParseException { + try { + parse("CREATE TABLE LIKE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_LOCAL() throws ParseException { + try { + parse("CREATE TABLE LOCAL (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_NONE() throws ParseException { + try { + parse("CREATE TABLE NONE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_NULL() throws ParseException { + try { + parse("CREATE TABLE NULL (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_OF() throws ParseException { + try { + parse("CREATE TABLE OF (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_ORDER() throws ParseException { + try { + parse("CREATE TABLE ORDER (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_OUT() throws ParseException { + try { + parse("CREATE TABLE OUT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_OUTER() throws ParseException { + try { + parse("CREATE TABLE OUTER (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_PARTITION() throws ParseException { + try { + parse("CREATE TABLE PARTITION (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_PERCENT() throws ParseException { + try { + parse("CREATE TABLE PERCENT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_PROCEDURE() throws ParseException { + try { + parse("CREATE TABLE PROCEDURE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_RANGE() throws ParseException { + try { + parse("CREATE TABLE RANGE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_READS() throws ParseException { + try { + parse("CREATE TABLE READS (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_REVOKE() throws ParseException { + try { + parse("CREATE TABLE REVOKE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_RIGHT() throws ParseException { + try { + parse("CREATE TABLE RIGHT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_ROLLUP() throws ParseException { + try { + parse("CREATE TABLE ROLLUP (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_ROW() throws ParseException { + try { + parse("CREATE TABLE ROW (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_ROWS() throws ParseException { + try { + parse("CREATE TABLE ROWS (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_SET() throws ParseException { + try { + parse("CREATE TABLE SET (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_SMALLINT() throws ParseException { + try { + parse("CREATE TABLE SMALLINT (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_TABLE() throws ParseException { + try { + parse("CREATE TABLE TABLE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_TIMESTAMP() throws ParseException { + try { + parse("CREATE TABLE TIMESTAMP (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_TO() throws ParseException { + try { + parse("CREATE TABLE TO (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_TRIGGER() throws ParseException { + try { + parse("CREATE TABLE TRIGGER (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_TRUE() throws ParseException { + try { + parse("CREATE TABLE TRUE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_TRUNCATE() throws ParseException { + try { + parse("CREATE TABLE TRUNCATE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_UNION() throws ParseException { + try { + parse("CREATE TABLE UNION (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_UPDATE() throws ParseException { + try { + parse("CREATE TABLE UPDATE (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_USER() throws ParseException { + try { + parse("CREATE TABLE USER (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_USING() throws ParseException { + try { + parse("CREATE TABLE USING (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_VALUES() throws ParseException { + try { + parse("CREATE TABLE VALUES (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + + @Test + public void testSQL11ReservedKeyWords_WITH() throws ParseException { + try { + parse("CREATE TABLE WITH (col STRING)"); + Assert.assertFalse("Expected ParseException", true); + } catch (Exception ex) { + Assert.assertEquals("Failure didn't match.", null, ex.getMessage()); + } + } + +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsPositive.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsPositive.java new file mode 100644 index 0000000..72fe442 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestSQL11ReservedKeyWordsPositive.java @@ -0,0 +1,771 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import java.io.IOException; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Parser tests for SQL11 Reserved KeyWords. Please find more information in + * HIVE-6617. Total number : 71 + */ +public class TestSQL11ReservedKeyWordsPositive { + private static HiveConf conf; + + private ParseDriver pd; + private SemanticAnalyzer sA; + private Context ctx; + + @BeforeClass + public static void initialize() { + conf = new HiveConf(SemanticAnalyzer.class); + conf.setBoolVar(ConfVars.HIVE_SUPPORT_SQL11_RESERVED_KEYWORDS, false); + SessionState.start(conf); + } + + @Before + public void setup() throws SemanticException, IOException { + pd = new ParseDriver(); + sA = new CalcitePlanner(conf); + ctx = new Context(conf); + } + + ASTNode parse(String query) throws ParseException { + ASTNode nd = pd.parse(query, ctx); + return (ASTNode) nd.getChild(0); + } + + @Test + public void testSQL11ReservedKeyWords_ALL() throws ParseException { + ASTNode ast = parse("CREATE TABLE ALL (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME ALL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_ALTER() throws ParseException { + ASTNode ast = parse("CREATE TABLE ALTER (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME ALTER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_ARRAY() throws ParseException { + ASTNode ast = parse("CREATE TABLE ARRAY (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME ARRAY) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_AS() throws ParseException { + ASTNode ast = parse("CREATE TABLE AS (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME AS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_AUTHORIZATION() throws ParseException { + ASTNode ast = parse("CREATE TABLE AUTHORIZATION (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME AUTHORIZATION) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_BETWEEN() throws ParseException { + ASTNode ast = parse("CREATE TABLE BETWEEN (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME BETWEEN) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_BIGINT() throws ParseException { + ASTNode ast = parse("CREATE TABLE BIGINT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME BIGINT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_BINARY() throws ParseException { + ASTNode ast = parse("CREATE TABLE BINARY (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME BINARY) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_BOOLEAN() throws ParseException { + ASTNode ast = parse("CREATE TABLE BOOLEAN (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME BOOLEAN) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_BOTH() throws ParseException { + ASTNode ast = parse("CREATE TABLE BOTH (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME BOTH) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_BY() throws ParseException { + ASTNode ast = parse("CREATE TABLE BY (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME BY) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_CREATE() throws ParseException { + ASTNode ast = parse("CREATE TABLE CREATE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME CREATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_CUBE() throws ParseException { + ASTNode ast = parse("CREATE TABLE CUBE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME CUBE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_CURSOR() throws ParseException { + ASTNode ast = parse("CREATE TABLE CURSOR (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME CURSOR) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_DATE() throws ParseException { + ASTNode ast = parse("CREATE TABLE DATE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME DATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_DECIMAL() throws ParseException { + ASTNode ast = parse("CREATE TABLE DECIMAL (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME DECIMAL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_DELETE() throws ParseException { + ASTNode ast = parse("CREATE TABLE DELETE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME DELETE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_DESCRIBE() throws ParseException { + ASTNode ast = parse("CREATE TABLE DESCRIBE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME DESCRIBE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_DOUBLE() throws ParseException { + ASTNode ast = parse("CREATE TABLE DOUBLE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME DOUBLE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_DROP() throws ParseException { + ASTNode ast = parse("CREATE TABLE DROP (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME DROP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_EXISTS() throws ParseException { + ASTNode ast = parse("CREATE TABLE EXISTS (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME EXISTS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_EXTERNAL() throws ParseException { + ASTNode ast = parse("CREATE TABLE EXTERNAL (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME EXTERNAL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_FALSE() throws ParseException { + ASTNode ast = parse("CREATE TABLE FALSE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME FALSE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_FETCH() throws ParseException { + ASTNode ast = parse("CREATE TABLE FETCH (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME FETCH) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_FLOAT() throws ParseException { + ASTNode ast = parse("CREATE TABLE FLOAT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME FLOAT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_FOR() throws ParseException { + ASTNode ast = parse("CREATE TABLE FOR (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME FOR) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_FULL() throws ParseException { + ASTNode ast = parse("CREATE TABLE FULL (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME FULL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_GRANT() throws ParseException { + ASTNode ast = parse("CREATE TABLE GRANT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME GRANT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_GROUP() throws ParseException { + ASTNode ast = parse("CREATE TABLE GROUP (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME GROUP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_GROUPING() throws ParseException { + ASTNode ast = parse("CREATE TABLE GROUPING (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME GROUPING) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_IMPORT() throws ParseException { + ASTNode ast = parse("CREATE TABLE IMPORT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME IMPORT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_IN() throws ParseException { + ASTNode ast = parse("CREATE TABLE IN (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME IN) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_INSERT() throws ParseException { + ASTNode ast = parse("CREATE TABLE INSERT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME INSERT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_INT() throws ParseException { + ASTNode ast = parse("CREATE TABLE INT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME INT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_INTERSECT() throws ParseException { + ASTNode ast = parse("CREATE TABLE INTERSECT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME INTERSECT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_INTO() throws ParseException { + ASTNode ast = parse("CREATE TABLE INTO (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME INTO) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_IS() throws ParseException { + ASTNode ast = parse("CREATE TABLE IS (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME IS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_LATERAL() throws ParseException { + ASTNode ast = parse("CREATE TABLE LATERAL (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME LATERAL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_LEFT() throws ParseException { + ASTNode ast = parse("CREATE TABLE LEFT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME LEFT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_LIKE() throws ParseException { + ASTNode ast = parse("CREATE TABLE LIKE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME LIKE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_LOCAL() throws ParseException { + ASTNode ast = parse("CREATE TABLE LOCAL (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME LOCAL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_NONE() throws ParseException { + ASTNode ast = parse("CREATE TABLE NONE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME NONE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_NULL() throws ParseException { + ASTNode ast = parse("CREATE TABLE NULL (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME NULL) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_OF() throws ParseException { + ASTNode ast = parse("CREATE TABLE OF (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME OF) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_ORDER() throws ParseException { + ASTNode ast = parse("CREATE TABLE ORDER (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME ORDER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_OUT() throws ParseException { + ASTNode ast = parse("CREATE TABLE OUT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME OUT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_OUTER() throws ParseException { + ASTNode ast = parse("CREATE TABLE OUTER (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME OUTER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_PARTITION() throws ParseException { + ASTNode ast = parse("CREATE TABLE PARTITION (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME PARTITION) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_PERCENT() throws ParseException { + ASTNode ast = parse("CREATE TABLE PERCENT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME PERCENT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_PROCEDURE() throws ParseException { + ASTNode ast = parse("CREATE TABLE PROCEDURE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME PROCEDURE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_RANGE() throws ParseException { + ASTNode ast = parse("CREATE TABLE RANGE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME RANGE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_READS() throws ParseException { + ASTNode ast = parse("CREATE TABLE READS (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME READS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_REVOKE() throws ParseException { + ASTNode ast = parse("CREATE TABLE REVOKE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME REVOKE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_RIGHT() throws ParseException { + ASTNode ast = parse("CREATE TABLE RIGHT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME RIGHT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_ROLLUP() throws ParseException { + ASTNode ast = parse("CREATE TABLE ROLLUP (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME ROLLUP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_ROW() throws ParseException { + ASTNode ast = parse("CREATE TABLE ROW (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME ROW) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_ROWS() throws ParseException { + ASTNode ast = parse("CREATE TABLE ROWS (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME ROWS) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_SET() throws ParseException { + ASTNode ast = parse("CREATE TABLE SET (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME SET) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_SMALLINT() throws ParseException { + ASTNode ast = parse("CREATE TABLE SMALLINT (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME SMALLINT) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_TABLE() throws ParseException { + ASTNode ast = parse("CREATE TABLE TABLE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME TABLE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_TIMESTAMP() throws ParseException { + ASTNode ast = parse("CREATE TABLE TIMESTAMP (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME TIMESTAMP) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_TO() throws ParseException { + ASTNode ast = parse("CREATE TABLE TO (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME TO) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_TRIGGER() throws ParseException { + ASTNode ast = parse("CREATE TABLE TRIGGER (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME TRIGGER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_TRUE() throws ParseException { + ASTNode ast = parse("CREATE TABLE TRUE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME TRUE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_TRUNCATE() throws ParseException { + ASTNode ast = parse("CREATE TABLE TRUNCATE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME TRUNCATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_UNION() throws ParseException { + ASTNode ast = parse("CREATE TABLE UNION (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME UNION) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_UPDATE() throws ParseException { + ASTNode ast = parse("CREATE TABLE UPDATE (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME UPDATE) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_USER() throws ParseException { + ASTNode ast = parse("CREATE TABLE USER (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME USER) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_USING() throws ParseException { + ASTNode ast = parse("CREATE TABLE USING (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME USING) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_VALUES() throws ParseException { + ASTNode ast = parse("CREATE TABLE VALUES (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME VALUES) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + + @Test + public void testSQL11ReservedKeyWords_WITH() throws ParseException { + ASTNode ast = parse("CREATE TABLE WITH (col STRING)"); + Assert + .assertEquals( + "AST doesn't match", + "(TOK_CREATETABLE (TOK_TABNAME WITH) TOK_LIKETABLE (TOK_TABCOLLIST (TOK_TABCOL col TOK_STRING)))", + ast.toStringTree()); + } + +} diff --git a/ql/src/test/queries/clientnegative/serde_regex.q b/ql/src/test/queries/clientnegative/serde_regex.q index 13b3f16..c9cfc7d 100644 --- a/ql/src/test/queries/clientnegative/serde_regex.q +++ b/ql/src/test/queries/clientnegative/serde_regex.q @@ -3,7 +3,7 @@ USE default; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time TIMESTAMP, request STRING, status INT, diff --git a/ql/src/test/queries/clientnegative/serde_regex2.q b/ql/src/test/queries/clientnegative/serde_regex2.q index d523d03..395cfec 100644 --- a/ql/src/test/queries/clientnegative/serde_regex2.q +++ b/ql/src/test/queries/clientnegative/serde_regex2.q @@ -3,7 +3,7 @@ USE default; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/queries/clientnegative/serde_regex3.q b/ql/src/test/queries/clientnegative/serde_regex3.q index 5a0295c..4e91f06 100644 --- a/ql/src/test/queries/clientnegative/serde_regex3.q +++ b/ql/src/test/queries/clientnegative/serde_regex3.q @@ -3,7 +3,7 @@ USE default; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/queries/clientpositive/ambiguitycheck.q b/ql/src/test/queries/clientpositive/ambiguitycheck.q new file mode 100644 index 0000000..3b029e1 --- /dev/null +++ b/ql/src/test/queries/clientpositive/ambiguitycheck.q @@ -0,0 +1,44 @@ +set hive.cbo.enable=false; + +-- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))); + +-- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)); + +SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)); + +explain extended select int(1.2) from src limit 1; +select int(1.2) from src limit 1; +select bigint(1.34) from src limit 1; +select binary('1') from src limit 1; +select boolean(1) from src limit 1; +select date('1') from src limit 2; +select double(1) from src limit 1; +select float(1) from src limit 1; +select smallint(0.9) from src limit 1; +select timestamp('1') from src limit 2; + +explain extended desc default.src key; + +desc default.src key; diff --git a/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q b/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q index 49c1f54..ea6e9d5 100644 --- a/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q +++ b/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q @@ -1,11 +1,11 @@ set hive.fetch.task.conversion=more; -create table array_table (array array, index int ); +create table array_table (`array` array, index int ); insert into table array_table select array('first', 'second', 'third'), key%3 from src tablesample (4 rows); explain -select index, array[index] from array_table; -select index, array[index] from array_table; +select index, `array`[index] from array_table; +select index, `array`[index] from array_table; create table map_table (data map, key int ); insert into table map_table select map('1','one','2','two','3','three'), cast((key%3+1) as int) from src tablesample (4 rows); diff --git a/ql/src/test/queries/clientpositive/decimal_10_0.q b/ql/src/test/queries/clientpositive/decimal_10_0.q index 02b547c..c3e031d 100644 --- a/ql/src/test/queries/clientpositive/decimal_10_0.q +++ b/ql/src/test/queries/clientpositive/decimal_10_0.q @@ -1,9 +1,9 @@ -DROP TABLE IF EXISTS DECIMAL; +DROP TABLE IF EXISTS `DECIMAL`; -CREATE TABLE DECIMAL (dec decimal); +CREATE TABLE `DECIMAL` (dec decimal); -LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL; +LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE `DECIMAL`; -SELECT dec FROM DECIMAL; +SELECT dec FROM `DECIMAL`; -DROP TABLE DECIMAL; \ No newline at end of file +DROP TABLE `DECIMAL`; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q index f12b2c5..376e893 100644 --- a/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/dynamic_partition_pruning.q @@ -8,49 +8,49 @@ set hive.optimize.index.filter=true; select distinct ds from srcpart; select distinct hr from srcpart; -EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds; -create table srcpart_date as select ds as ds, ds as date from srcpart group by ds; +EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; create table srcpart_hour as select hr as hr, hr as hour from srcpart group by hr; -create table srcpart_date_hour as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr; +create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; create table srcpart_double_hour as select (hr*2) as hr, hr as hour from srcpart group by hr; -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08'; -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; set hive.tez.dynamic.partition.pruning=false; EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = 'I DONT EXIST'; @@ -73,34 +73,34 @@ select count(*) from srcpart where cast(hr as string) = 11; -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; -select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; +select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); -select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; -select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -115,26 +115,26 @@ set hive.auto.convert.join.noconditionaltask = true; set hive.auto.convert.join.noconditionaltask.size = 10000000; -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -- Disabled until TEZ-1486 is fixed --- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -- expressions EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; @@ -144,27 +144,27 @@ select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart select count(*) from srcpart where hr = 11; -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; -select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; +select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- Disabled until TEZ-1486 is fixed -- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) --- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +-- where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -180,8 +180,8 @@ set hive.vectorized.execution.enabled=false; set hive.exec.max.dynamic.partitions=1000; insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart; -EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09'); -select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09'); +EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); +select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11; drop table srcpart_orc; diff --git a/ql/src/test/queries/clientpositive/keyword_1.q b/ql/src/test/queries/clientpositive/keyword_1.q index 2e996af..d274515 100644 --- a/ql/src/test/queries/clientpositive/keyword_1.q +++ b/ql/src/test/queries/clientpositive/keyword_1.q @@ -1,9 +1,9 @@ -- SORT_BEFORE_DIFF -create table test_user (user string, `group` string); +create table test_user (`user` string, `group` string); grant select on table test_user to user hive_test; -explain select user from test_user; +explain select `user` from test_user; show grant user hive_test on table test_user; diff --git a/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q b/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q index e33b4bf..6f00006 100644 --- a/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q +++ b/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q @@ -1,12 +1,12 @@ -CREATE TABLE table(string string) STORED AS TEXTFILE; +CREATE TABLE `table`(`string` string) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table; +LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE `table`; -SELECT table, count(1) +SELECT `table`, count(1) FROM ( - FROM table - SELECT TRANSFORM (table.string) - USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (table, count) + FROM `table` + SELECT TRANSFORM (`table`.`string`) + USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (`table`, count) ) subq -GROUP BY table; +GROUP BY `table`; diff --git a/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q b/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q index 144cfee..5393664 100644 --- a/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q +++ b/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q @@ -1,26 +1,26 @@ -DROP TABLE insert; +DROP TABLE `insert`; -CREATE TABLE insert (key INT, as STRING); +CREATE TABLE `insert` (key INT, `as` STRING); -EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; +EXPLAIN INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100; +INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100; SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM `insert` ) t; -EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; +EXPLAIN INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100; +INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100; SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM `insert` ) t; -SELECT COUNT(*) FROM insert; +SELECT COUNT(*) FROM `insert`; -EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10; -INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10; +EXPLAIN INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10; +INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10; SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM `insert` ) t; -DROP TABLE insert; +DROP TABLE `insert`; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q index 3cf488f..f769abb 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat17.q @@ -4,7 +4,7 @@ DROP TABLE PW17; ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-custom-serde/${system:hive.version}/hive-it-custom-serde-${system:hive.version}.jar; -CREATE TABLE PW17(USER STRING, COMPLEXDT ARRAY) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1'; +CREATE TABLE PW17(`USER` STRING, COMPLEXDT ARRAY) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1'; LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17 PARTITION (YEAR='1'); ALTER TABLE PW17 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe2'; ALTER TABLE PW17 SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1'; @@ -13,13 +13,13 @@ SELECT * FROM PW17; -- Test for non-parititioned table. DROP TABLE PW17_2; -CREATE TABLE PW17_2(USER STRING, COMPLEXDT ARRAY) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1'; +CREATE TABLE PW17_2(`USER` STRING, COMPLEXDT ARRAY) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1'; LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_2; -- Without the fix HIVE-5199, will throw cast exception via MapOperator SELECT COUNT(*) FROM PW17_2; DROP TABLE PW17_3; -CREATE TABLE PW17_3(USER STRING, COMPLEXDT ARRAY >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3'; +CREATE TABLE PW17_3(`USER` STRING, COMPLEXDT ARRAY >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3'; LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_3 PARTITION (YEAR='1'); ALTER TABLE PW17_3 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe2'; ALTER TABLE PW17_3 SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3'; @@ -27,7 +27,7 @@ ALTER TABLE PW17_3 SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3'; SELECT * FROM PW17; DROP TABLE PW17_4; -CREATE TABLE PW17_4(USER STRING, COMPLEXDT ARRAY >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3'; +CREATE TABLE PW17_4(`USER` STRING, COMPLEXDT ARRAY >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3'; LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW17_4; -- Without the fix HIVE-5285, will throw cast exception via MapOperator SELECT COUNT(*) FROM PW17_4; diff --git a/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q b/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q index 40ed258..e9aef8a 100644 --- a/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q +++ b/ql/src/test/queries/clientpositive/partition_wise_fileformat18.q @@ -5,7 +5,7 @@ DROP TABLE PW18; ADD JAR ${system:maven.local.repository}/org/apache/hive/hive-it-custom-serde/${system:hive.version}/hive-it-custom-serde-${system:hive.version}.jar; -CREATE TABLE PW18(USER STRING, COMPLEXDT UNIONTYPE) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5'; +CREATE TABLE PW18(`USER` STRING, COMPLEXDT UNIONTYPE) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5'; LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW18 PARTITION (YEAR='1'); ALTER TABLE PW18 PARTITION(YEAR='1') SET SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe4'; -- Without the fix HIVE-5202, will throw unsupported data type exception. @@ -13,7 +13,7 @@ SELECT * FROM PW18; -- Test for non-parititioned table. DROP TABLE PW18_2; -CREATE TABLE PW18_2(USER STRING, COMPLEXDT UNIONTYPE) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5'; +CREATE TABLE PW18_2(`USER` STRING, COMPLEXDT UNIONTYPE) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5'; LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE PW18_2; -- Without the fix HIVE-5202, will throw unsupported data type exception SELECT COUNT(*) FROM PW18_2; diff --git a/ql/src/test/queries/clientpositive/ppd_field_garbage.q b/ql/src/test/queries/clientpositive/ppd_field_garbage.q index 23e0778..67bea5c 100644 --- a/ql/src/test/queries/clientpositive/ppd_field_garbage.q +++ b/ql/src/test/queries/clientpositive/ppd_field_garbage.q @@ -1,3 +1,5 @@ +set hive.support.sql11.reserved.keywords=false; +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 -- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>); CREATE VIEW v_test_issue AS SELECT fileid, i.user, test_c.user_c.age FROM test_issue LATERAL VIEW explode(infos) info AS i; diff --git a/ql/src/test/queries/clientpositive/serde_regex.q b/ql/src/test/queries/clientpositive/serde_regex.q index accdb54..91cc147 100644 --- a/ql/src/test/queries/clientpositive/serde_regex.q +++ b/ql/src/test/queries/clientpositive/serde_regex.q @@ -2,7 +2,7 @@ EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -18,7 +18,7 @@ STORED AS TEXTFILE; CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/queries/clientpositive/tez_union_group_by.q b/ql/src/test/queries/clientpositive/tez_union_group_by.q index 56e8583..4a58474 100644 --- a/ql/src/test/queries/clientpositive/tez_union_group_by.q +++ b/ql/src/test/queries/clientpositive/tez_union_group_by.q @@ -4,7 +4,7 @@ u bigint, t string, st string ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB"); @@ -12,7 +12,7 @@ CREATE TABLE y ( u bigint ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB"); @@ -20,7 +20,7 @@ CREATE TABLE z ( u bigint ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB"); @@ -37,14 +37,14 @@ EXPLAIN SELECT o.u, n.u FROM ( -SELECT m.u, Min(date) as ft +SELECT m.u, Min(`date`) as ft FROM ( -SELECT u, date FROM x WHERE date < '2014-09-02' +SELECT u, `date` FROM x WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM y WHERE date < '2014-09-02' +SELECT u, `date` FROM y WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM z WHERE date < '2014-09-02' +SELECT u, `date` FROM z WHERE `date` < '2014-09-02' ) m GROUP BY m.u ) n @@ -54,7 +54,7 @@ SELECT x.u FROM x JOIN v ON (x.t = v.t AND x.st <=> v.st) -WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03' GROUP BY x.u ) o ON n.u = o.u @@ -63,14 +63,14 @@ WHERE n.u <> 0 AND n.ft <= '2014-09-02'; SELECT o.u, n.u FROM ( -SELECT m.u, Min(date) as ft +SELECT m.u, Min(`date`) as ft FROM ( -SELECT u, date FROM x WHERE date < '2014-09-02' +SELECT u, `date` FROM x WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM y WHERE date < '2014-09-02' +SELECT u, `date` FROM y WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM z WHERE date < '2014-09-02' +SELECT u, `date` FROM z WHERE `date` < '2014-09-02' ) m GROUP BY m.u ) n @@ -80,7 +80,7 @@ SELECT x.u FROM x JOIN v ON (x.t = v.t AND x.st <=> v.st) -WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03' GROUP BY x.u ) o ON n.u = o.u diff --git a/ql/src/test/queries/clientpositive/union_remove_1.q b/ql/src/test/queries/clientpositive/union_remove_1.q index 0db1743..b5d829c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_1.q +++ b/ql/src/test/queries/clientpositive/union_remove_1.q @@ -18,7 +18,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -26,20 +26,20 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_10.q b/ql/src/test/queries/clientpositive/union_remove_10.q index 1e9c201..c79d160 100644 --- a/ql/src/test/queries/clientpositive/union_remove_10.q +++ b/ql/src/test/queries/clientpositive/union_remove_10.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, `values` bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,28 +31,28 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b; insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_11.q b/ql/src/test/queries/clientpositive/union_remove_11.q index 7052c69..4ebcc02 100644 --- a/ql/src/test/queries/clientpositive/union_remove_11.q +++ b/ql/src/test/queries/clientpositive/union_remove_11.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, `values` bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,28 +31,28 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b; insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_12.q b/ql/src/test/queries/clientpositive/union_remove_12.q index 67a1829..6dbf3a6 100644 --- a/ql/src/test/queries/clientpositive/union_remove_12.q +++ b/ql/src/test/queries/clientpositive/union_remove_12.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- on create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, `values` bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,22 +31,22 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_13.q b/ql/src/test/queries/clientpositive/union_remove_13.q index 29c164a..6b01bc8 100644 --- a/ql/src/test/queries/clientpositive/union_remove_13.q +++ b/ql/src/test/queries/clientpositive/union_remove_13.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- on create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, `values` bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,22 +31,22 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_14.q b/ql/src/test/queries/clientpositive/union_remove_14.q index ca2f5e5..b4535c6 100644 --- a/ql/src/test/queries/clientpositive/union_remove_14.q +++ b/ql/src/test/queries/clientpositive/union_remove_14.q @@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true; -- on create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, `values` bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -32,22 +32,22 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_15.q b/ql/src/test/queries/clientpositive/union_remove_15.q index 72ced75..e3def4c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_15.q +++ b/ql/src/test/queries/clientpositive/union_remove_15.q @@ -24,7 +24,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile; +create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -32,17 +32,17 @@ explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a; insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a; desc formatted outputTbl1; @@ -50,5 +50,5 @@ desc formatted outputTbl1; show partitions outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1' order by key, values; -select * from outputTbl1 where ds = '2' order by key, values; +select * from outputTbl1 where ds = '1' order by key, `values`; +select * from outputTbl1 where ds = '2' order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_16.q b/ql/src/test/queries/clientpositive/union_remove_16.q index 72e6cb1..cfd901f 100644 --- a/ql/src/test/queries/clientpositive/union_remove_16.q +++ b/ql/src/test/queries/clientpositive/union_remove_16.q @@ -24,7 +24,7 @@ set hive.exec.dynamic.partition=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile ; +create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile ; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -32,22 +32,22 @@ explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a; insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a; desc formatted outputTbl1; show partitions outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1' order by key, values; -select * from outputTbl1 where ds = '2' order by key, values; +select * from outputTbl1 where ds = '1' order by key, `values`; +select * from outputTbl1 where ds = '2' order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_17.q b/ql/src/test/queries/clientpositive/union_remove_17.q index fa68755..817ad50 100644 --- a/ql/src/test/queries/clientpositive/union_remove_17.q +++ b/ql/src/test/queries/clientpositive/union_remove_17.q @@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile; +create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,22 +29,22 @@ explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a; insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a; desc formatted outputTbl1; show partitions outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '1' order by key, values; -select * from outputTbl1 where ds = '2' order by key, values; +select * from outputTbl1 where ds = '1' order by key, `values`; +select * from outputTbl1 where ds = '2' order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_18.q b/ql/src/test/queries/clientpositive/union_remove_18.q index 6d2d331..e2ba33e 100644 --- a/ql/src/test/queries/clientpositive/union_remove_18.q +++ b/ql/src/test/queries/clientpositive/union_remove_18.q @@ -23,7 +23,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, ds string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile; +create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -31,17 +31,17 @@ explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a; insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a; desc formatted outputTbl1; @@ -49,6 +49,6 @@ desc formatted outputTbl1; show partitions outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 where ds = '11' order by key, values; -select * from outputTbl1 where ds = '18' order by key, values; -select * from outputTbl1 where ds is not null order by key, values, ds; +select * from outputTbl1 where ds = '11' order by key, `values`; +select * from outputTbl1 where ds = '18' order by key, `values`; +select * from outputTbl1 where ds is not null order by key, `values`, ds; diff --git a/ql/src/test/queries/clientpositive/union_remove_19.q b/ql/src/test/queries/clientpositive/union_remove_19.q index 17b8a0f..d3c17e1 100644 --- a/ql/src/test/queries/clientpositive/union_remove_19.q +++ b/ql/src/test/queries/clientpositive/union_remove_19.q @@ -20,25 +20,25 @@ set mapred.input.dir.recursive=true; -- SORT_QUERY_RESULTS create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; desc formatted outputTbl1; @@ -48,19 +48,19 @@ select * from outputTbl1; -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7; insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7; select * from outputTbl1; @@ -68,26 +68,26 @@ select * from outputTbl1; -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7; insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_2.q b/ql/src/test/queries/clientpositive/union_remove_2.q index 0142325..0982a62 100644 --- a/ql/src/test/queries/clientpositive/union_remove_2.q +++ b/ql/src/test/queries/clientpositive/union_remove_2.q @@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,25 +27,25 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_20.q b/ql/src/test/queries/clientpositive/union_remove_20.q index 1c59ef2..a782feb 100644 --- a/ql/src/test/queries/clientpositive/union_remove_20.q +++ b/ql/src/test/queries/clientpositive/union_remove_20.q @@ -19,28 +19,28 @@ set mapred.input.dir.recursive=true; -- columns being selected) is pushed above the union. create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(values bigint, key string) stored as textfile; +create table outputTbl1(`values` bigint, key string) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_21.q b/ql/src/test/queries/clientpositive/union_remove_21.q index cbaa08b..09c537c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_21.q +++ b/ql/src/test/queries/clientpositive/union_remove_21.q @@ -27,17 +27,17 @@ explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; desc formatted outputTbl1; diff --git a/ql/src/test/queries/clientpositive/union_remove_22.q b/ql/src/test/queries/clientpositive/union_remove_22.q index 982912b..96f4fe6 100644 --- a/ql/src/test/queries/clientpositive/union_remove_22.q +++ b/ql/src/test/queries/clientpositive/union_remove_22.q @@ -18,25 +18,25 @@ set mapred.input.dir.recursive=true; -- both the sub-qeuries of the union) is pushed above the union. create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint, values2 bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint, values2 bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; desc formatted outputTbl1; @@ -45,20 +45,20 @@ select * from outputTbl1; explain insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_23.q b/ql/src/test/queries/clientpositive/union_remove_23.q index 63e4418..d3c4b73 100644 --- a/ql/src/test/queries/clientpositive/union_remove_23.q +++ b/ql/src/test/queries/clientpositive/union_remove_23.q @@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,22 +27,22 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as `values` from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) subq2; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as `values` from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) subq2; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_24.q b/ql/src/test/queries/clientpositive/union_remove_24.q index 88c378d..10cec54 100644 --- a/ql/src/test/queries/clientpositive/union_remove_24.q +++ b/ql/src/test/queries/clientpositive/union_remove_24.q @@ -17,7 +17,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key double, values bigint) stored as textfile; +create table outputTbl1(key double, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -25,20 +25,20 @@ EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a; INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_25.q b/ql/src/test/queries/clientpositive/union_remove_25.q index 27d9ebe..be50c0f 100644 --- a/ql/src/test/queries/clientpositive/union_remove_25.q +++ b/ql/src/test/queries/clientpositive/union_remove_25.q @@ -19,9 +19,9 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile; -create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile; -create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile; +create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile; +create table outputTbl2(key string, `values` bigint) partitioned by (ds string) stored as textfile; +create table outputTbl3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,23 +29,23 @@ explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; desc formatted outputTbl1 partition(ds='2004'); set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; explain insert overwrite table outputTbl2 partition(ds) diff --git a/ql/src/test/queries/clientpositive/union_remove_3.q b/ql/src/test/queries/clientpositive/union_remove_3.q index 7e1b113..0e491eb 100644 --- a/ql/src/test/queries/clientpositive/union_remove_3.q +++ b/ql/src/test/queries/clientpositive/union_remove_3.q @@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,25 +27,25 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_4.q b/ql/src/test/queries/clientpositive/union_remove_4.q index 44b31d6..b8b72e7 100644 --- a/ql/src/test/queries/clientpositive/union_remove_4.q +++ b/ql/src/test/queries/clientpositive/union_remove_4.q @@ -19,7 +19,7 @@ set hive.merge.smallfiles.avgsize=1; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,20 +27,20 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_5.q b/ql/src/test/queries/clientpositive/union_remove_5.q index c5c0b7f..4845c8c 100644 --- a/ql/src/test/queries/clientpositive/union_remove_5.q +++ b/ql/src/test/queries/clientpositive/union_remove_5.q @@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,24 +29,24 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_6.q b/ql/src/test/queries/clientpositive/union_remove_6.q index 6990ed2..8208fe2 100644 --- a/ql/src/test/queries/clientpositive/union_remove_6.q +++ b/ql/src/test/queries/clientpositive/union_remove_6.q @@ -15,28 +15,28 @@ set mapred.input.dir.recursive=true; -- merging is turned off create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; -create table outputTbl2(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; +create table outputTbl2(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select *; FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select *; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; -select * from outputTbl2 order by key, values;; +select * from outputTbl1 order by key, `values`; +select * from outputTbl2 order by key, `values`;; diff --git a/ql/src/test/queries/clientpositive/union_remove_6_subq.q b/ql/src/test/queries/clientpositive/union_remove_6_subq.q index 8bcac6f..592d523 100644 --- a/ql/src/test/queries/clientpositive/union_remove_6_subq.q +++ b/ql/src/test/queries/clientpositive/union_remove_6_subq.q @@ -14,17 +14,17 @@ set mapred.input.dir.recursive=true; -- merging is turned off create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; -create table outputTbl2(key string, values bigint) stored as textfile; +create table outputTbl1(key string, `values` bigint) stored as textfile; +create table outputTbl2(key string, `values` bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; explain FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -32,17 +32,17 @@ insert overwrite table outputTbl2 select *; FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select *; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; -select * from outputTbl2 order by key, values; +select * from outputTbl1 order by key, `values`; +select * from outputTbl2 order by key, `values`; -- The following queries guarantee the correctness. explain diff --git a/ql/src/test/queries/clientpositive/union_remove_7.q b/ql/src/test/queries/clientpositive/union_remove_7.q index c254aba..a915226 100644 --- a/ql/src/test/queries/clientpositive/union_remove_7.q +++ b/ql/src/test/queries/clientpositive/union_remove_7.q @@ -20,7 +20,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, `values` bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -28,20 +28,20 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_8.q b/ql/src/test/queries/clientpositive/union_remove_8.q index 8dfb8e8..462c3fd 100644 --- a/ql/src/test/queries/clientpositive/union_remove_8.q +++ b/ql/src/test/queries/clientpositive/union_remove_8.q @@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, `values` bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,24 +29,24 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/union_remove_9.q b/ql/src/test/queries/clientpositive/union_remove_9.q index c9a4dc3..a2674c8 100644 --- a/ql/src/test/queries/clientpositive/union_remove_9.q +++ b/ql/src/test/queries/clientpositive/union_remove_9.q @@ -21,7 +21,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as rcfile; +create table outputTbl1(key string, `values` bigint) stored as rcfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -29,28 +29,28 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b; insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, `values`; diff --git a/ql/src/test/queries/clientpositive/vector_decimal_10_0.q b/ql/src/test/queries/clientpositive/vector_decimal_10_0.q index ae93058..e0d5b98 100644 --- a/ql/src/test/queries/clientpositive/vector_decimal_10_0.q +++ b/ql/src/test/queries/clientpositive/vector_decimal_10_0.q @@ -2,18 +2,18 @@ SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=minimal; DROP TABLE IF EXISTS decimal_txt; -DROP TABLE IF EXISTS decimal; +DROP TABLE IF EXISTS `decimal`; CREATE TABLE decimal_txt (dec decimal); LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE decimal_txt; -CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt; +CREATE TABLE `DECIMAL` STORED AS ORC AS SELECT * FROM decimal_txt; EXPLAIN -SELECT dec FROM DECIMAL order by dec; +SELECT dec FROM `DECIMAL` order by dec; -SELECT dec FROM DECIMAL order by dec; +SELECT dec FROM `DECIMAL` order by dec; DROP TABLE DECIMAL_txt; -DROP TABLE DECIMAL; \ No newline at end of file +DROP TABLE `DECIMAL`; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q index 1197f7d..71971fe 100644 --- a/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q +++ b/ql/src/test/queries/clientpositive/vectorized_dynamic_partition_pruning.q @@ -9,49 +9,49 @@ set hive.vectorized.execution.enabled=true; select distinct ds from srcpart; select distinct hr from srcpart; -EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds; -create table srcpart_date stored as orc as select ds as ds, ds as date from srcpart group by ds; +EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds; +create table srcpart_date stored as orc as select ds as ds, ds as `date` from srcpart group by ds; create table srcpart_hour stored as orc as select hr as hr, hr as hour from srcpart group by hr; -create table srcpart_date_hour stored as orc as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr; +create table srcpart_date_hour stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr; create table srcpart_double_hour stored as orc as select (hr*2) as hr, hr as hour from srcpart group by hr; -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08'; -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; set hive.tez.dynamic.partition.pruning=false; EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; set hive.tez.dynamic.partition.pruning=false; -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; set hive.tez.dynamic.partition.pruning=true; select count(*) from srcpart where ds = 'I DONT EXIST'; @@ -74,34 +74,34 @@ select count(*) from srcpart where cast(hr as string) = 11; -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; -select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; +select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); -select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); +select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr); -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; -select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; +select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr; -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -116,26 +116,26 @@ set hive.auto.convert.join.noconditionaltask = true; set hive.auto.convert.join.noconditionaltask.size = 10000000; -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11; select count(*) from srcpart where hr = 11 and ds = '2008-04-08'; -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; -select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11; +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; +select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11; select count(*) from srcpart where ds = '2008-04-08' and hr = 11; -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -- Disabled until TEZ-1486 is fixed --- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -- expressions EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11; @@ -145,27 +145,27 @@ select count(*) from srcpart join srcpart_double_hour on (srcpart.hr*2 = srcpart select count(*) from srcpart where hr = 11; -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; -select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; +select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08'; select count(*) from srcpart where ds = '2008-04-08'; -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; -EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; +EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08'; +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08'; -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11; EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- Disabled until TEZ-1486 is fixed -- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) --- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +-- where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart); @@ -181,8 +181,8 @@ set hive.vectorized.execution.enabled=false; set hive.exec.max.dynamic.partitions=1000; insert into table srcpart_orc partition (ds, hr) select key, value, ds, hr from srcpart; -EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09'); -select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09'); +EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); +select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09'); select count(*) from srcpart where (ds = '2008-04-08' or ds = '2008-04-09') and hr = 11; drop table srcpart_orc; diff --git a/ql/src/test/results/clientnegative/authorization_cannot_create_all_role.q.out b/ql/src/test/results/clientnegative/authorization_cannot_create_all_role.q.out index 99f20bd..3bc663b 100644 --- a/ql/src/test/results/clientnegative/authorization_cannot_create_all_role.q.out +++ b/ql/src/test/results/clientnegative/authorization_cannot_create_all_role.q.out @@ -2,6 +2,4 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: create role all -PREHOOK: type: CREATEROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Role name cannot be one of the reserved roles: [ALL, DEFAULT, NONE] +FAILED: ParseException line 2:12 Failed to recognize predicate 'all'. Failed rule: 'identifier' in create role diff --git a/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out b/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out index 4808433..a8f6c1c 100644 --- a/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out +++ b/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out @@ -2,6 +2,4 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: create role None -PREHOOK: type: CREATEROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Role name cannot be one of the reserved roles: [ALL, DEFAULT, NONE] +FAILED: ParseException line 2:12 Failed to recognize predicate 'None'. Failed rule: 'identifier' in create role diff --git a/ql/src/test/results/clientnegative/lateral_view_join.q.out b/ql/src/test/results/clientnegative/lateral_view_join.q.out index 8ad1386..74a7ea5 100644 --- a/ql/src/test/results/clientnegative/lateral_view_join.q.out +++ b/ql/src/test/results/clientnegative/lateral_view_join.q.out @@ -1 +1 @@ -FAILED: ParseException line 1:62 missing EOF at 'myTable' near 'AS' +FAILED: ParseException line 1:59 Failed to recognize predicate 'AS'. Failed rule: 'identifier' in table alias diff --git a/ql/src/test/results/clientnegative/select_charliteral.q.out b/ql/src/test/results/clientnegative/select_charliteral.q.out index 09e409b..1d02fa1 100644 --- a/ql/src/test/results/clientnegative/select_charliteral.q.out +++ b/ql/src/test/results/clientnegative/select_charliteral.q.out @@ -1,2 +1 @@ -FAILED: ParseException line 3:11 missing \' at ',' near '_c17' in character string literal -line 4:0 mismatched input '' expecting \' near '_c17' in character string literal +FAILED: ParseException line 3:7 cannot recognize input near '_c17' ',' 'count' in expression specification diff --git a/ql/src/test/results/clientnegative/serde_regex.q.out b/ql/src/test/results/clientnegative/serde_regex.q.out index fc29724..7892bb2 100644 --- a/ql/src/test/results/clientnegative/serde_regex.q.out +++ b/ql/src/test/results/clientnegative/serde_regex.q.out @@ -8,7 +8,7 @@ PREHOOK: query: -- This should fail because Regex SerDe doesn't support STRUCT CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time TIMESTAMP, request STRING, status INT, diff --git a/ql/src/test/results/clientnegative/serde_regex2.q.out b/ql/src/test/results/clientnegative/serde_regex2.q.out index 198f79b..1ceb387 100644 --- a/ql/src/test/results/clientnegative/serde_regex2.q.out +++ b/ql/src/test/results/clientnegative/serde_regex2.q.out @@ -8,7 +8,7 @@ PREHOOK: query: -- Mismatch between the number of matching groups and columns, t CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -27,7 +27,7 @@ POSTHOOK: query: -- Mismatch between the number of matching groups and columns, CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/results/clientnegative/serde_regex3.q.out b/ql/src/test/results/clientnegative/serde_regex3.q.out index 1df4cd6..028a24f 100644 --- a/ql/src/test/results/clientnegative/serde_regex3.q.out +++ b/ql/src/test/results/clientnegative/serde_regex3.q.out @@ -8,7 +8,7 @@ PREHOOK: query: -- null input.regex, raise an exception CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/results/clientpositive/ambiguitycheck.q.out b/ql/src/test/results/clientpositive/ambiguitycheck.q.out new file mode 100644 index 0000000..c618cef --- /dev/null +++ b/ql/src/test/results/clientpositive/ambiguitycheck.q.out @@ -0,0 +1,873 @@ +PREHOOK: query: -- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: -- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 3 +10 val_10 1 +100 val_100 2 +103 val_103 2 +104 val_104 2 +105 val_105 1 +11 val_11 1 +111 val_111 1 +113 val_113 2 +114 val_114 1 +116 val_116 1 +118 val_118 2 +119 val_119 3 +12 val_12 2 +120 val_120 2 +125 val_125 2 +126 val_126 1 +128 val_128 3 +129 val_129 2 +131 val_131 1 +133 val_133 1 +134 val_134 2 +136 val_136 1 +137 val_137 2 +138 val_138 4 +143 val_143 1 +145 val_145 1 +146 val_146 2 +149 val_149 2 +15 val_15 2 +150 val_150 1 +152 val_152 2 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 2 +165 val_165 2 +166 val_166 1 +167 val_167 3 +168 val_168 1 +169 val_169 4 +17 val_17 1 +170 val_170 1 +172 val_172 2 +174 val_174 2 +175 val_175 2 +176 val_176 2 +177 val_177 1 +178 val_178 1 +179 val_179 2 +18 val_18 2 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 3 +189 val_189 1 +19 val_19 1 +190 val_190 1 +191 val_191 2 +192 val_192 1 +193 val_193 3 +194 val_194 1 +195 val_195 2 +196 val_196 1 +197 val_197 2 +199 val_199 3 +2 val_2 1 +20 val_20 1 +200 val_200 2 +201 val_201 1 +202 val_202 1 +203 val_203 2 +205 val_205 2 +207 val_207 2 +208 val_208 3 +209 val_209 2 +213 val_213 2 +214 val_214 1 +216 val_216 2 +217 val_217 2 +218 val_218 1 +219 val_219 2 +221 val_221 2 +222 val_222 1 +223 val_223 2 +224 val_224 2 +226 val_226 1 +228 val_228 1 +229 val_229 2 +230 val_230 5 +233 val_233 2 +235 val_235 1 +237 val_237 2 +238 val_238 2 +239 val_239 2 +24 val_24 2 +241 val_241 1 +242 val_242 2 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 2 +256 val_256 2 +257 val_257 1 +258 val_258 1 +26 val_26 2 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 2 +266 val_266 1 +27 val_27 1 +272 val_272 2 +273 val_273 3 +274 val_274 1 +275 val_275 1 +277 val_277 4 +278 val_278 2 +28 val_28 1 +280 val_280 2 +281 val_281 2 +282 val_282 2 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 2 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 3 +30 val_30 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 2 +308 val_308 1 +309 val_309 2 +310 val_310 1 +311 val_311 3 +315 val_315 1 +316 val_316 3 +317 val_317 2 +318 val_318 3 +321 val_321 2 +322 val_322 2 +323 val_323 1 +325 val_325 2 +327 val_327 3 +33 val_33 1 +331 val_331 2 +332 val_332 1 +333 val_333 2 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +34 val_34 1 +341 val_341 1 +342 val_342 2 +344 val_344 2 +345 val_345 1 +348 val_348 5 +35 val_35 3 +351 val_351 1 +353 val_353 2 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 2 +368 val_368 1 +369 val_369 3 +37 val_37 2 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 2 +384 val_384 3 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 2 +396 val_396 3 +397 val_397 2 +399 val_399 2 +4 val_4 1 +400 val_400 1 +401 val_401 5 +402 val_402 1 +403 val_403 3 +404 val_404 2 +406 val_406 4 +407 val_407 1 +409 val_409 3 +41 val_41 1 +411 val_411 1 +413 val_413 2 +414 val_414 2 +417 val_417 3 +418 val_418 1 +419 val_419 1 +42 val_42 2 +421 val_421 1 +424 val_424 2 +427 val_427 1 +429 val_429 2 +43 val_43 1 +430 val_430 3 +431 val_431 3 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 3 +439 val_439 2 +44 val_44 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 3 +455 val_455 1 +457 val_457 1 +458 val_458 2 +459 val_459 2 +460 val_460 1 +462 val_462 2 +463 val_463 2 +466 val_466 3 +467 val_467 1 +468 val_468 4 +469 val_469 5 +47 val_47 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 2 +479 val_479 1 +480 val_480 3 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 4 +490 val_490 1 +491 val_491 1 +492 val_492 2 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 3 +5 val_5 3 +51 val_51 2 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 2 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 2 +69 val_69 1 +70 val_70 3 +72 val_72 2 +74 val_74 1 +76 val_76 2 +77 val_77 1 +78 val_78 1 +8 val_8 1 +80 val_80 1 +82 val_82 1 +83 val_83 2 +84 val_84 2 +85 val_85 1 +86 val_86 1 +87 val_87 1 +9 val_9 1 +90 val_90 3 +92 val_92 1 +95 val_95 2 +96 val_96 1 +97 val_97 2 +98 val_98 2 +PREHOOK: query: SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 3 +10 val_10 1 +100 val_100 2 +103 val_103 2 +104 val_104 2 +105 val_105 1 +11 val_11 1 +111 val_111 1 +113 val_113 2 +114 val_114 1 +116 val_116 1 +118 val_118 2 +119 val_119 3 +12 val_12 2 +120 val_120 2 +125 val_125 2 +126 val_126 1 +128 val_128 3 +129 val_129 2 +131 val_131 1 +133 val_133 1 +134 val_134 2 +136 val_136 1 +137 val_137 2 +138 val_138 4 +143 val_143 1 +145 val_145 1 +146 val_146 2 +149 val_149 2 +15 val_15 2 +150 val_150 1 +152 val_152 2 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 2 +165 val_165 2 +166 val_166 1 +167 val_167 3 +168 val_168 1 +169 val_169 4 +17 val_17 1 +170 val_170 1 +172 val_172 2 +174 val_174 2 +175 val_175 2 +176 val_176 2 +177 val_177 1 +178 val_178 1 +179 val_179 2 +18 val_18 2 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 3 +189 val_189 1 +19 val_19 1 +190 val_190 1 +191 val_191 2 +192 val_192 1 +193 val_193 3 +194 val_194 1 +195 val_195 2 +196 val_196 1 +197 val_197 2 +199 val_199 3 +2 val_2 1 +20 val_20 1 +200 val_200 2 +201 val_201 1 +202 val_202 1 +203 val_203 2 +205 val_205 2 +207 val_207 2 +208 val_208 3 +209 val_209 2 +213 val_213 2 +214 val_214 1 +216 val_216 2 +217 val_217 2 +218 val_218 1 +219 val_219 2 +221 val_221 2 +222 val_222 1 +223 val_223 2 +224 val_224 2 +226 val_226 1 +228 val_228 1 +229 val_229 2 +230 val_230 5 +233 val_233 2 +235 val_235 1 +237 val_237 2 +238 val_238 2 +239 val_239 2 +24 val_24 2 +241 val_241 1 +242 val_242 2 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 2 +256 val_256 2 +257 val_257 1 +258 val_258 1 +26 val_26 2 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 2 +266 val_266 1 +27 val_27 1 +272 val_272 2 +273 val_273 3 +274 val_274 1 +275 val_275 1 +277 val_277 4 +278 val_278 2 +28 val_28 1 +280 val_280 2 +281 val_281 2 +282 val_282 2 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 2 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 3 +30 val_30 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 2 +308 val_308 1 +309 val_309 2 +310 val_310 1 +311 val_311 3 +315 val_315 1 +316 val_316 3 +317 val_317 2 +318 val_318 3 +321 val_321 2 +322 val_322 2 +323 val_323 1 +325 val_325 2 +327 val_327 3 +33 val_33 1 +331 val_331 2 +332 val_332 1 +333 val_333 2 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +34 val_34 1 +341 val_341 1 +342 val_342 2 +344 val_344 2 +345 val_345 1 +348 val_348 5 +35 val_35 3 +351 val_351 1 +353 val_353 2 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 2 +368 val_368 1 +369 val_369 3 +37 val_37 2 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 2 +384 val_384 3 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 2 +396 val_396 3 +397 val_397 2 +399 val_399 2 +4 val_4 1 +400 val_400 1 +401 val_401 5 +402 val_402 1 +403 val_403 3 +404 val_404 2 +406 val_406 4 +407 val_407 1 +409 val_409 3 +41 val_41 1 +411 val_411 1 +413 val_413 2 +414 val_414 2 +417 val_417 3 +418 val_418 1 +419 val_419 1 +42 val_42 2 +421 val_421 1 +424 val_424 2 +427 val_427 1 +429 val_429 2 +43 val_43 1 +430 val_430 3 +431 val_431 3 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 3 +439 val_439 2 +44 val_44 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 3 +455 val_455 1 +457 val_457 1 +458 val_458 2 +459 val_459 2 +460 val_460 1 +462 val_462 2 +463 val_463 2 +466 val_466 3 +467 val_467 1 +468 val_468 4 +469 val_469 5 +47 val_47 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 2 +479 val_479 1 +480 val_480 3 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 4 +490 val_490 1 +491 val_491 1 +492 val_492 2 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 3 +5 val_5 3 +51 val_51 2 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 2 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 2 +69 val_69 1 +70 val_70 3 +72 val_72 2 +74 val_74 1 +76 val_76 2 +77 val_77 1 +78 val_78 1 +8 val_8 1 +80 val_80 1 +82 val_82 1 +83 val_83 2 +84 val_84 2 +85 val_85 1 +86 val_86 1 +87 val_87 1 +9 val_9 1 +90 val_90 3 +92 val_92 1 +95 val_95 2 +96 val_96 1 +97 val_97 2 +98 val_98 2 +PREHOOK: query: explain extended select int(1.2) from src limit 1 +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select int(1.2) from src limit 1 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + src + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_FUNCTION + int + 1.2 + TOK_LIMIT + 1 + + +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: 1 + Processor Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: 1 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 2000 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 1 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + ListSink + +PREHOOK: query: select int(1.2) from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select int(1.2) from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +1 +PREHOOK: query: select bigint(1.34) from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select bigint(1.34) from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +1 +PREHOOK: query: select binary('1') from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select binary('1') from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +1 +PREHOOK: query: select boolean(1) from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select boolean(1) from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +true +PREHOOK: query: select date('1') from src limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select date('1') from src limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +NULL +NULL +PREHOOK: query: select double(1) from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select double(1) from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +1.0 +PREHOOK: query: select float(1) from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select float(1) from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +1.0 +PREHOOK: query: select smallint(0.9) from src limit 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select smallint(0.9) from src limit 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 +PREHOOK: query: select timestamp('1') from src limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select timestamp('1') from src limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +NULL +NULL +PREHOOK: query: explain extended desc default.src key +PREHOOK: type: DESCTABLE +POSTHOOK: query: explain extended desc default.src key +POSTHOOK: type: DESCTABLE +ABSTRACT SYNTAX TREE: + +TOK_DESCTABLE + TOK_TABTYPE + . + default + src + key + + +STAGE DEPENDENCIES: + Stage-0 is a root stage + Stage-1 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-0 + Describe Table Operator: + Describe Table +#### A masked pattern was here #### + table: default.src + + Stage: Stage-1 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: desc default.src key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src +POSTHOOK: query: desc default.src key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src +key string from deserializer diff --git a/ql/src/test/results/clientpositive/array_map_access_nonconstant.q.out b/ql/src/test/results/clientpositive/array_map_access_nonconstant.q.out index d8b88c6..7442c05 100644 --- a/ql/src/test/results/clientpositive/array_map_access_nonconstant.q.out +++ b/ql/src/test/results/clientpositive/array_map_access_nonconstant.q.out @@ -1,8 +1,8 @@ -PREHOOK: query: create table array_table (array array, index int ) +PREHOOK: query: create table array_table (`array` array, index int ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@array_table -POSTHOOK: query: create table array_table (array array, index int ) +POSTHOOK: query: create table array_table (`array` array, index int ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@array_table @@ -17,10 +17,10 @@ POSTHOOK: Output: default@array_table POSTHOOK: Lineage: array_table.array EXPRESSION [] POSTHOOK: Lineage: array_table.index EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: explain -select index, array[index] from array_table +select index, `array`[index] from array_table PREHOOK: type: QUERY POSTHOOK: query: explain -select index, array[index] from array_table +select index, `array`[index] from array_table POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -39,11 +39,11 @@ STAGE PLANS: Statistics: Num rows: 4 Data size: 80 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: select index, array[index] from array_table +PREHOOK: query: select index, `array`[index] from array_table PREHOOK: type: QUERY PREHOOK: Input: default@array_table #### A masked pattern was here #### -POSTHOOK: query: select index, array[index] from array_table +POSTHOOK: query: select index, `array`[index] from array_table POSTHOOK: type: QUERY POSTHOOK: Input: default@array_table #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/decimal_10_0.q.out b/ql/src/test/results/clientpositive/decimal_10_0.q.out index ae3426c..ee2d0fe 100644 --- a/ql/src/test/results/clientpositive/decimal_10_0.q.out +++ b/ql/src/test/results/clientpositive/decimal_10_0.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL +PREHOOK: query: DROP TABLE IF EXISTS `DECIMAL` PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL +POSTHOOK: query: DROP TABLE IF EXISTS `DECIMAL` POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL (dec decimal) +PREHOOK: query: CREATE TABLE `DECIMAL` (dec decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DECIMAL -POSTHOOK: query: CREATE TABLE DECIMAL (dec decimal) +POSTHOOK: query: CREATE TABLE `DECIMAL` (dec decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DECIMAL -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE `DECIMAL` PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@decimal -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE `DECIMAL` POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@decimal -PREHOOK: query: SELECT dec FROM DECIMAL +PREHOOK: query: SELECT dec FROM `DECIMAL` PREHOOK: type: QUERY PREHOOK: Input: default@decimal #### A masked pattern was here #### -POSTHOOK: query: SELECT dec FROM DECIMAL +POSTHOOK: query: SELECT dec FROM `DECIMAL` POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal #### A masked pattern was here #### 1000000000 NULL -PREHOOK: query: DROP TABLE DECIMAL +PREHOOK: query: DROP TABLE `DECIMAL` PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal PREHOOK: Output: default@decimal -POSTHOOK: query: DROP TABLE DECIMAL +POSTHOOK: query: DROP TABLE `DECIMAL` POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal POSTHOOK: Output: default@decimal diff --git a/ql/src/test/results/clientpositive/keyword_1.q.out b/ql/src/test/results/clientpositive/keyword_1.q.out index 135d8e5..a37093d 100644 --- a/ql/src/test/results/clientpositive/keyword_1.q.out +++ b/ql/src/test/results/clientpositive/keyword_1.q.out @@ -1,12 +1,12 @@ PREHOOK: query: -- SORT_BEFORE_DIFF -create table test_user (user string, `group` string) +create table test_user (`user` string, `group` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_user POSTHOOK: query: -- SORT_BEFORE_DIFF -create table test_user (user string, `group` string) +create table test_user (`user` string, `group` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_user @@ -16,9 +16,9 @@ PREHOOK: Output: default@test_user POSTHOOK: query: grant select on table test_user to user hive_test POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@test_user -PREHOOK: query: explain select user from test_user +PREHOOK: query: explain select `user` from test_user PREHOOK: type: QUERY -POSTHOOK: query: explain select user from test_user +POSTHOOK: query: explain select `user` from test_user POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out b/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out index 819da22..aa0dd47 100644 --- a/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out +++ b/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: CREATE TABLE table(string string) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE `table`(`string` string) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table -POSTHOOK: query: CREATE TABLE table(string string) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE `table`(`string` string) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE `table` PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@table -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE `table` POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@table -PREHOOK: query: SELECT table, count(1) +PREHOOK: query: SELECT `table`, count(1) FROM ( - FROM table - SELECT TRANSFORM (table.string) + FROM `table` + SELECT TRANSFORM (`table`.`string`) #### A masked pattern was here #### ) subq -GROUP BY table +GROUP BY `table` PREHOOK: type: QUERY PREHOOK: Input: default@table #### A masked pattern was here #### -POSTHOOK: query: SELECT table, count(1) +POSTHOOK: query: SELECT `table`, count(1) FROM ( - FROM table - SELECT TRANSFORM (table.string) + FROM `table` + SELECT TRANSFORM (`table`.`string`) #### A masked pattern was here #### ) subq -GROUP BY table +GROUP BY `table` POSTHOOK: type: QUERY POSTHOOK: Input: default@table #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out b/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out index 9f075f1..e70673e 100644 --- a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out +++ b/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out @@ -1,18 +1,18 @@ -PREHOOK: query: DROP TABLE insert +PREHOOK: query: DROP TABLE `insert` PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE insert +POSTHOOK: query: DROP TABLE `insert` POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE insert (key INT, as STRING) +PREHOOK: query: CREATE TABLE `insert` (key INT, `as` STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@insert -POSTHOOK: query: CREATE TABLE insert (key INT, as STRING) +POSTHOOK: query: CREATE TABLE `insert` (key INT, `as` STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@insert -PREHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 +PREHOOK: query: EXPLAIN INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 +POSTHOOK: query: EXPLAIN INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -71,32 +71,32 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 +PREHOOK: query: INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@insert -POSTHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 +POSTHOOK: query: INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@insert POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM `insert` ) t PREHOOK: type: QUERY PREHOOK: Input: default@insert #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM `insert` ) t POSTHOOK: type: QUERY POSTHOOK: Input: default@insert #### A masked pattern was here #### 10226524244 -PREHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 +PREHOOK: query: EXPLAIN INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 +POSTHOOK: query: EXPLAIN INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -155,41 +155,41 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 +PREHOOK: query: INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@insert -POSTHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 +POSTHOOK: query: INSERT INTO TABLE `insert` SELECT * FROM src LIMIT 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@insert POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM `insert` ) t PREHOOK: type: QUERY PREHOOK: Input: default@insert #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM `insert` ) t POSTHOOK: type: QUERY POSTHOOK: Input: default@insert #### A masked pattern was here #### 20453048488 -PREHOOK: query: SELECT COUNT(*) FROM insert +PREHOOK: query: SELECT COUNT(*) FROM `insert` PREHOOK: type: QUERY PREHOOK: Input: default@insert #### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM insert +POSTHOOK: query: SELECT COUNT(*) FROM `insert` POSTHOOK: type: QUERY POSTHOOK: Input: default@insert #### A masked pattern was here #### 200 -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -248,34 +248,34 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 +PREHOOK: query: INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@insert -POSTHOOK: query: INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 +POSTHOOK: query: INSERT OVERWRITE TABLE `insert` SELECT * FROM src LIMIT 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@insert POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM `insert` ) t PREHOOK: type: QUERY PREHOOK: Input: default@insert #### A masked pattern was here #### POSTHOOK: query: SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert + SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM `insert` ) t POSTHOOK: type: QUERY POSTHOOK: Input: default@insert #### A masked pattern was here #### -826625916 -PREHOOK: query: DROP TABLE insert +PREHOOK: query: DROP TABLE `insert` PREHOOK: type: DROPTABLE PREHOOK: Input: default@insert PREHOOK: Output: default@insert -POSTHOOK: query: DROP TABLE insert +POSTHOOK: query: DROP TABLE `insert` POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@insert POSTHOOK: Output: default@insert diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat17.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat17.q.out index e9502e0..028a26e 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat17.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat17.q.out @@ -10,11 +10,11 @@ POSTHOOK: query: -- HIVE-5199, HIVE-5285 : CustomSerDe(1, 2, 3) are used here. DROP TABLE PW17 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE PW17(USER STRING, COMPLEXDT ARRAY) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' +PREHOOK: query: CREATE TABLE PW17(`USER` STRING, COMPLEXDT ARRAY) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@PW17 -POSTHOOK: query: CREATE TABLE PW17(USER STRING, COMPLEXDT ARRAY) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' +POSTHOOK: query: CREATE TABLE PW17(`USER` STRING, COMPLEXDT ARRAY) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@PW17 @@ -66,11 +66,11 @@ PREHOOK: type: DROPTABLE POSTHOOK: query: -- Test for non-parititioned table. DROP TABLE PW17_2 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE PW17_2(USER STRING, COMPLEXDT ARRAY) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' +PREHOOK: query: CREATE TABLE PW17_2(`USER` STRING, COMPLEXDT ARRAY) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@PW17_2 -POSTHOOK: query: CREATE TABLE PW17_2(USER STRING, COMPLEXDT ARRAY) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' +POSTHOOK: query: CREATE TABLE PW17_2(`USER` STRING, COMPLEXDT ARRAY) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@PW17_2 @@ -97,11 +97,11 @@ PREHOOK: query: DROP TABLE PW17_3 PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE PW17_3 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE PW17_3(USER STRING, COMPLEXDT ARRAY >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3' +PREHOOK: query: CREATE TABLE PW17_3(`USER` STRING, COMPLEXDT ARRAY >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@PW17_3 -POSTHOOK: query: CREATE TABLE PW17_3(USER STRING, COMPLEXDT ARRAY >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3' +POSTHOOK: query: CREATE TABLE PW17_3(`USER` STRING, COMPLEXDT ARRAY >) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@PW17_3 @@ -151,11 +151,11 @@ PREHOOK: query: DROP TABLE PW17_4 PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE PW17_4 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE PW17_4(USER STRING, COMPLEXDT ARRAY >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3' +PREHOOK: query: CREATE TABLE PW17_4(`USER` STRING, COMPLEXDT ARRAY >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@PW17_4 -POSTHOOK: query: CREATE TABLE PW17_4(USER STRING, COMPLEXDT ARRAY >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3' +POSTHOOK: query: CREATE TABLE PW17_4(`USER` STRING, COMPLEXDT ARRAY >) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe3' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@PW17_4 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat18.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat18.q.out index 5d75ff8..6303d44 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat18.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat18.q.out @@ -12,11 +12,11 @@ POSTHOOK: query: -- HIVE-5202 : Tests for SettableUnionObjectInspectors DROP TABLE PW18 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE PW18(USER STRING, COMPLEXDT UNIONTYPE) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' +PREHOOK: query: CREATE TABLE PW18(`USER` STRING, COMPLEXDT UNIONTYPE) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@PW18 -POSTHOOK: query: CREATE TABLE PW18(USER STRING, COMPLEXDT UNIONTYPE) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' +POSTHOOK: query: CREATE TABLE PW18(`USER` STRING, COMPLEXDT UNIONTYPE) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@PW18 @@ -60,11 +60,11 @@ PREHOOK: type: DROPTABLE POSTHOOK: query: -- Test for non-parititioned table. DROP TABLE PW18_2 POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE PW18_2(USER STRING, COMPLEXDT UNIONTYPE) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' +PREHOOK: query: CREATE TABLE PW18_2(`USER` STRING, COMPLEXDT UNIONTYPE) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@PW18_2 -POSTHOOK: query: CREATE TABLE PW18_2(USER STRING, COMPLEXDT UNIONTYPE) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' +POSTHOOK: query: CREATE TABLE PW18_2(`USER` STRING, COMPLEXDT UNIONTYPE) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@PW18_2 diff --git a/ql/src/test/results/clientpositive/ppd_field_garbage.q.out b/ql/src/test/results/clientpositive/ppd_field_garbage.q.out index 86eca5b..1ce7a39 100644 --- a/ql/src/test/results/clientpositive/ppd_field_garbage.q.out +++ b/ql/src/test/results/clientpositive/ppd_field_garbage.q.out @@ -1,9 +1,11 @@ -PREHOOK: query: -- ppd leaves invalid expr in field expr +PREHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 +-- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_issue -POSTHOOK: query: -- ppd leaves invalid expr in field expr +POSTHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 +-- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/serde_regex.q.out b/ql/src/test/results/clientpositive/serde_regex.q.out index 19187ba..ad3af57 100644 --- a/ql/src/test/results/clientpositive/serde_regex.q.out +++ b/ql/src/test/results/clientpositive/serde_regex.q.out @@ -2,7 +2,7 @@ PREHOOK: query: EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -19,7 +19,7 @@ POSTHOOK: query: EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -50,7 +50,7 @@ STAGE PLANS: PREHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, @@ -68,7 +68,7 @@ PREHOOK: Output: default@serde_regex POSTHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, - user STRING, + `user` STRING, time STRING, request STRING, status STRING, diff --git a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out index 581d305..027ede7 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -156,9 +156,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -166,9 +166,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -211,11 +211,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out index 629f7ba..2bc06d3 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,12 +54,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -67,12 +67,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -206,12 +206,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -220,12 +220,12 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -269,11 +269,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out index 257d2ff..76cd688 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,12 +54,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -67,12 +67,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -192,12 +192,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -206,12 +206,12 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -255,11 +255,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out new file mode 100644 index 0000000..81f815f --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out @@ -0,0 +1,281 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a map-only query, and the +-- other one is a map-join query), followed by select star and a file sink. +-- The union optimization is applied, and the union is removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a map-only query, and the +-- other one is a map-join query), followed by select star and a file sink. +-- The union optimization is applied, and the union is removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as `values` from inputTbl1 +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as `values` from inputTbl1 +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-7 is a root stage + Stage-1 depends on stages: Stage-7 + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4 + Stage-3 + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5 + Stage-2 + Stage-4 + Stage-5 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-7 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 key (type: string) + 1 key (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Select Operator + expressions: key (type: string), UDFToString(1) (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Map 2 + Map Operator Tree: + TableScan + alias: a + Filter Operator + predicate: key is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col6 + input vertices: + 1 Map 3 + Select Operator + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Local Work: + Map Reduce Local Work + + Stage: Stage-6 + Conditional Operator + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-4 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as `values` from inputTbl1 +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as `values` from inputTbl1 +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +values bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 2 + numRows -1 + rawDataSize -1 + totalSize 194 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 order by key, `values` +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, `values` +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 11 +2 1 +2 12 +3 1 +3 13 +7 1 +7 17 +8 1 +8 1 +8 18 +8 18 +8 28 +8 28 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out new file mode 100644 index 0000000..bf25b54 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out @@ -0,0 +1,306 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a mapred query, and the +-- other one is a map-join query), followed by select star and a file sink. +-- The union selectstar optimization should be performed, and the union should be removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a mapred query, and the +-- other one is a map-join query), followed by select star and a file sink. +-- The union selectstar optimization should be performed, and the union should be removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, count(1) as `values` from inputTbl1 group by key +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, count(1) as `values` from inputTbl1 group by key +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-7 is a root stage + Stage-1 depends on stages: Stage-7 + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4 + Stage-3 + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5 + Stage-2 + Stage-4 + Stage-5 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-7 + Spark +#### A masked pattern was here #### + Vertices: + Map 4 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 key (type: string) + 1 key (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: a + Filter Operator + predicate: key is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col6 + input vertices: + 1 Map 4 + Select Operator + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Local Work: + Map Reduce Local Work + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToString(_col1) (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + + Stage: Stage-6 + Conditional Operator + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-4 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, count(1) as `values` from inputTbl1 group by key +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, count(1) as `values` from inputTbl1 group by key +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +values bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 3 + numRows -1 + rawDataSize -1 + totalSize 271 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 order by key, `values` +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, `values` +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 11 +2 1 +2 12 +3 1 +3 13 +7 1 +7 17 +8 2 +8 18 +8 18 +8 28 +8 28 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out new file mode 100644 index 0000000..8259c08 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out @@ -0,0 +1,283 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a map-only query, and the +-- other one contains a join, which should be performed as a map-join query at runtime), +-- followed by select star and a file sink. +-- The union selectstar optimization should be performed, and the union should be removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (one of which is a map-only query, and the +-- other one contains a join, which should be performed as a map-join query at runtime), +-- followed by select star and a file sink. +-- The union selectstar optimization should be performed, and the union should be removed. + +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +-- The final file format is different from the input and intermediate file format. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- on + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as `values` from inputTbl1 +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as `values` from inputTbl1 +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-7 is a root stage + Stage-1 depends on stages: Stage-7 + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4 + Stage-3 + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5 + Stage-2 + Stage-4 + Stage-5 depends on stages: Stage-4 + +STAGE PLANS: + Stage: Stage-7 + Spark +#### A masked pattern was here #### + Vertices: + Map 3 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Spark HashTable Sink Operator + keys: + 0 key (type: string) + 1 key (type: string) + Local Work: + Map Reduce Local Work + + Stage: Stage-1 + Spark +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Select Operator + expressions: key (type: string), UDFToString(1) (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Map 2 + Map Operator Tree: + TableScan + alias: a + Filter Operator + predicate: key is not null (type: boolean) + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0, _col6 + input vertices: + 1 Map 3 + Select Operator + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(_col1) (type: bigint) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + Local Work: + Map Reduce Local Work + + Stage: Stage-6 + Conditional Operator + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + output format: org.apache.hadoop.hive.ql.io.RCFileOutputFormat + serde: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe + name: default.outputtbl1 + + Stage: Stage-2 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-4 + Spark +#### A masked pattern was here #### + Vertices: + Spark Merge File Work + Merge File Operator + Map Operator Tree: + RCFile Merge Operator + merge level: block + input format: org.apache.hadoop.hive.ql.io.RCFileInputFormat + + Stage: Stage-5 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as `values` from inputTbl1 +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT * FROM +( +select key, 1 as `values` from inputTbl1 +union all +select a.key as key, b.val as `values` +FROM inputTbl1 a join inputTbl1 b on a.key=b.key +)c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)a.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)b.FieldSchema(name:val, type:string, comment:null), ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +values bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 2 + numRows -1 + rawDataSize -1 + totalSize 194 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe +InputFormat: org.apache.hadoop.hive.ql.io.RCFileInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.RCFileOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 order by key, `values` +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, `values` +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 11 +2 1 +2 12 +3 1 +3 13 +7 1 +7 17 +8 1 +8 1 +8 18 +8 18 +8 28 +8 28 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out index 09cd5d3..1f1e3c8 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,18 +52,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -170,9 +170,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -180,9 +180,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -236,12 +236,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -251,12 +251,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out index 0db279f..f17b7a4 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -210,9 +210,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -220,9 +220,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -276,12 +276,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -291,12 +291,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out index 8f317de..9704d0f 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -119,9 +119,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -129,9 +129,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -185,12 +185,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -201,12 +201,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 7 1 1 8 1 1 8 1 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out index 96c8c25..4b71e77 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -168,9 +168,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -178,9 +178,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -250,31 +250,31 @@ ds=13 ds=17 ds=18 ds=28 -PREHOOK: query: select * from outputTbl1 where ds = '11' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '11' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=11 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '11' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '11' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=11 #### A masked pattern was here #### 1 1 11 1 1 11 -PREHOOK: query: select * from outputTbl1 where ds = '18' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '18' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=18 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '18' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '18' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=18 #### A masked pattern was here #### 8 1 18 8 1 18 -PREHOOK: query: select * from outputTbl1 where ds is not null order by key, values, ds +PREHOOK: query: select * from outputTbl1 where ds is not null order by key, `values`, ds PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=11 @@ -284,7 +284,7 @@ PREHOOK: Input: default@outputtbl1@ds=17 PREHOOK: Input: default@outputtbl1@ds=18 PREHOOK: Input: default@outputtbl1@ds=28 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds is not null order by key, values, ds +POSTHOOK: query: select * from outputTbl1 where ds is not null order by key, `values`, ds POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=11 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out index 7049a91..290d11a 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,20 +48,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -158,21 +158,21 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -236,21 +236,21 @@ POSTHOOK: Input: default@outputtbl1 PREHOOK: query: -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY POSTHOOK: query: -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -365,21 +365,21 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -399,26 +399,26 @@ POSTHOOK: Input: default@outputtbl1 PREHOOK: query: -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY POSTHOOK: query: -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY @@ -526,26 +526,26 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY @@ -553,11 +553,11 @@ POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out index 298929d..06d4ad8 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -154,11 +154,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -166,11 +166,11 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -213,11 +213,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out index 328b1ac..bd8f9d7 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(values bigint, key string) stored as textfile +PREHOOK: query: create table outputTbl1(`values` bigint, key string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(values bigint, key string) stored as textfile +POSTHOOK: query: create table outputTbl1(`values` bigint, key string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,20 +46,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -162,21 +162,21 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -219,11 +219,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out index b160397..9379e34 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out @@ -48,18 +48,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -170,9 +170,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -180,9 +180,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out new file mode 100644 index 0000000..261d560 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out @@ -0,0 +1,397 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 map-reduce subqueries is performed followed by select and a file sink +-- However, some columns are repeated. So, union cannot be removed. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- off +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23. The union is removed, the select (which selects columns from +-- both the sub-qeuries of the union) is pushed above the union. + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 map-reduce subqueries is performed followed by select and a file sink +-- However, some columns are repeated. So, union cannot be removed. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- off +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23. The union is removed, the select (which selects columns from +-- both the sub-qeuries of the union) is pushed above the union. + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, `values` bigint, values2 bigint) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, `values` bigint, values2 bigint) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT a.key, a.`values`, a.`values` +FROM ( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) a +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT a.key, a.`values`, a.`values` +FROM ( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) a +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 4 <- Map 3 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint), _col1 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint), _col1 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT a.key, a.`values`, a.`values` +FROM ( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT a.key, a.`values`, a.`values` +FROM ( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.values2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +values bigint +values2 bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 4 + numRows -1 + rawDataSize -1 + totalSize 60 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +8 2 2 +2 1 1 +1 1 1 +3 1 1 +7 1 1 +8 2 2 +2 1 1 +1 1 1 +3 1 1 +7 1 1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) +FROM ( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) a +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) +FROM ( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) a +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 4 <- Map 3 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(concat(_col1, _col1)) (type: bigint), UDFToLong(concat(_col1, _col1)) (type: bigint) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), UDFToLong(concat(_col1, _col1)) (type: bigint), UDFToLong(concat(_col1, _col1)) (type: bigint) + outputColumnNames: _col0, _col1, _col2 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) +FROM ( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) a +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) +FROM ( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) a +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl1.values2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, `values` +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, `values` +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 11 11 +1 11 11 +2 11 11 +2 11 11 +3 11 11 +3 11 11 +7 11 11 +7 11 11 +8 22 22 +8 22 22 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_23.q.out b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out new file mode 100644 index 0000000..4add1d6 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out @@ -0,0 +1,265 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 map-reduce subqueries is performed followed by select star and a file sink +-- There is no need to write the temporary results of the sub-queries, and then read them +-- again to process the union. The union can be removed completely. One of the sub-queries +-- would have multiple map-reduce jobs. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- off +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 map-reduce subqueries is performed followed by select star and a file sink +-- There is no need to write the temporary results of the sub-queries, and then read them +-- again to process the union. The union can be removed completely. One of the sub-queries +-- would have multiple map-reduce jobs. +-- It does not matter, whether the output is merged or not. In this case, merging is turned +-- off +-- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) +-- Since this test creates sub-directories for the output table outputTbl1, it might be easier +-- to run the test only on hadoop 23 + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * +FROM ( + SELECT key, count(1) as `values` from + (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) subq2 +PREHOOK: type: QUERY +POSTHOOK: query: explain +insert overwrite table outputTbl1 +SELECT * +FROM ( + SELECT key, count(1) as `values` from + (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) subq2 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2), Map 4 (PARTITION-LEVEL SORT, 2) + Reducer 6 <- Map 5 (GROUP, 2) + Reducer 3 <- Reducer 2 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 4 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Map 5 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: string) + 1 key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + Reducer 6 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + +PREHOOK: query: insert overwrite table outputTbl1 +SELECT * +FROM ( + SELECT key, count(1) as `values` from + (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) subq2 +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +POSTHOOK: query: insert overwrite table outputTbl1 +SELECT * +FROM ( + SELECT key, count(1) as `values` from + (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key +) subq2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)a.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)a.null, (inputtbl1)a.null, (inputtbl1)b.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: desc formatted outputTbl1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@outputtbl1 +POSTHOOK: query: desc formatted outputTbl1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@outputtbl1 +# col_name data_type comment + +key string +values bigint + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Protect Mode: None +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE false + numFiles 4 + numRows -1 + rawDataSize -1 + totalSize 40 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: select * from outputTbl1 order by key, `values` +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, `values` +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 1 +2 1 +2 1 +3 1 +3 1 +7 1 +7 1 +8 2 +8 4 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out index 8bc748d..881e4ed 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out @@ -24,11 +24,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key double, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key double, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key double, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key double, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -44,18 +44,18 @@ PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -160,9 +160,9 @@ STAGE PLANS: PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -170,9 +170,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -215,11 +215,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out index 396a43d..f32aaea 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out @@ -26,27 +26,27 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl2(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile +PREHOOK: query: create table outputTbl3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl3 -POSTHOOK: query: create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile +POSTHOOK: query: create table outputTbl3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl3 @@ -62,18 +62,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -174,9 +174,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -184,9 +184,9 @@ PREHOOK: Output: default@outputtbl1@ds=2004 POSTHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -234,12 +234,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2004 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2004 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out index bac5441..df17b35 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -140,11 +140,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -152,11 +152,11 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -199,11 +199,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out index e83788a..5d6dd88 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -206,9 +206,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -216,9 +216,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -261,11 +261,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out index 0deb03d..554cffc 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,22 +50,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -206,11 +206,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -218,11 +218,11 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -265,11 +265,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6.q.out index 64c252d..ea0a1a0 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_6.q.out @@ -20,19 +20,19 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 @@ -46,18 +46,18 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -181,9 +181,9 @@ STAGE PLANS: name: default.outputtbl2 PREHOOK: query: FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -192,9 +192,9 @@ PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 PREHOOK: Output: default@outputtbl2 POSTHOOK: query: FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -206,11 +206,11 @@ POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(n POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### @@ -224,11 +224,11 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: select * from outputTbl2 order by key, values +PREHOOK: query: select * from outputTbl2 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl2 order by key, values +POSTHOOK: query: select * from outputTbl2 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out new file mode 100644 index 0000000..30df8e1 --- /dev/null +++ b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out @@ -0,0 +1,1177 @@ +PREHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (all of which are mapred queries) +-- followed by select star and a file sink in 2 output tables. +-- The optimiaztion does not take affect since it is a multi-table insert. +-- It does not matter, whether the output is merged or not. In this case, +-- merging is turned off + +create table inputTbl1(key string, val string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@inputTbl1 +POSTHOOK: query: -- This is to test the union->selectstar->filesink optimization +-- Union of 2 subqueries is performed (all of which are mapred queries) +-- followed by select star and a file sink in 2 output tables. +-- The optimiaztion does not take affect since it is a multi-table insert. +-- It does not matter, whether the output is merged or not. In this case, +-- merging is turned off + +create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@inputTbl1 +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl1 +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl1 +PREHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@outputTbl2 +POSTHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@outputTbl2 +PREHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@inputtbl1 +POSTHOOK: query: load data local inpath '../../data/files/T1.txt' into table inputTbl1 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@inputtbl1 +PREHOOK: query: explain +FROM ( + select * from( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key + )subq +) a +insert overwrite table outputTbl1 select * +insert overwrite table outputTbl2 select * +PREHOOK: type: QUERY +POSTHOOK: query: explain +FROM ( + select * from( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key + )subq +) a +insert overwrite table outputTbl1 select * +insert overwrite table outputTbl2 select * +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-2 is a root stage + Stage-0 depends on stages: Stage-2 + Stage-1 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-2 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 4 <- Map 3 (GROUP, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 3 + Map Operator Tree: + TableScan + alias: inputtbl1 + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl2 + Reducer 4 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl2 + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl1 + + Stage: Stage-1 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl2 + +PREHOOK: query: FROM ( + select * from( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key + )subq +) a +insert overwrite table outputTbl1 select * +insert overwrite table outputTbl2 select * +PREHOOK: type: QUERY +PREHOOK: Input: default@inputtbl1 +PREHOOK: Output: default@outputtbl1 +PREHOOK: Output: default@outputtbl2 +POSTHOOK: query: FROM ( + select * from( + SELECT key, count(1) as `values` from inputTbl1 group by key + UNION ALL + SELECT key, count(1) as `values` from inputTbl1 group by key + )subq +) a +insert overwrite table outputTbl1 select * +insert overwrite table outputTbl2 select * +POSTHOOK: type: QUERY +POSTHOOK: Input: default@inputtbl1 +POSTHOOK: Output: default@outputtbl1 +POSTHOOK: Output: default@outputtbl2 +POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] +POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] +PREHOOK: query: select * from outputTbl1 order by key, `values` +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl1 order by key, `values` +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl1 +#### A masked pattern was here #### +1 1 +1 1 +2 1 +2 1 +3 1 +3 1 +7 1 +7 1 +8 2 +8 2 +PREHOOK: query: select * from outputTbl2 order by key, `values` +PREHOOK: type: QUERY +PREHOOK: Input: default@outputtbl2 +#### A masked pattern was here #### +POSTHOOK: query: select * from outputTbl2 order by key, `values` +POSTHOOK: type: QUERY +POSTHOOK: Input: default@outputtbl2 +#### A masked pattern was here #### +1 1 +1 1 +2 1 +2 1 +3 1 +3 1 +7 1 +7 1 +8 2 +8 2 +PREHOOK: query: -- The following queries guarantee the correctness. +explain +select avg(c) from( + SELECT count(1)-200 as c from src + UNION ALL + SELECT count(1) as c from src +)subq +PREHOOK: type: QUERY +POSTHOOK: query: -- The following queries guarantee the correctness. +explain +select avg(c) from( + SELECT count(1)-200 as c from src + UNION ALL + SELECT count(1) as c from src +)subq +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 1) + Reducer 5 <- Map 4 (GROUP, 1) + Reducer 3 <- Reducer 2 (GROUP, 1), Reducer 5 (GROUP, 1) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Map 4 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(1) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Select Operator + expressions: (_col0 - 200) (type: bigint) + outputColumnNames: _col0 + Group By Operator + aggregations: avg(_col0) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: struct) + Reducer 3 + Reduce Operator Tree: + Group By Operator + aggregations: avg(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Group By Operator + aggregations: avg(_col0) + mode: hash + outputColumnNames: _col0 + Reduce Output Operator + sort order: + value expressions: _col0 (type: struct) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select avg(c) from( + SELECT count(1)-200 as c from src + UNION ALL + SELECT count(1) as c from src +)subq +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select avg(c) from( + SELECT count(1)-200 as c from src + UNION ALL + SELECT count(1) as c from src +)subq +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +400.0 +PREHOOK: query: explain +select key, avg(c) over w from( + SELECT key, count(1)*2 as c from src group by key + UNION ALL + SELECT key, count(1) as c from src group by key +)subq group by key, c +WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select key, avg(c) over w from( + SELECT key, count(1)*2 as c from src group by key + UNION ALL + SELECT key, count(1) as c from src group by key +)subq group by key, c +WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Spark + Edges: + Reducer 2 <- Map 1 (GROUP, 2) + Reducer 6 <- Map 5 (GROUP, 2) + Reducer 3 <- Reducer 2 (GROUP, 2), Reducer 6 (GROUP, 2) + Reducer 4 <- Reducer 3 (PARTITION-LEVEL SORT, 2) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Map 5 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Reducer 2 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Select Operator + expressions: _col0 (type: string), (_col1 * 2) (type: bigint) + outputColumnNames: _col0, _col1 + Group By Operator + keys: _col0 (type: string), _col1 (type: bigint) + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Reducer 4 + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + PTF Operator + Function definitions: + Input definition + input alias: ptf_0 + output shape: _col0: string, _col1: bigint + type: WINDOWING + Windowing table definition + input alias: ptf_1 + name: windowingtablefunction + order by: _col1 + partition by: _col0 + raw input shape: + window functions: + window function definition + alias: _wcol0 + arguments: _col1 + name: avg + window function: GenericUDAFAverageEvaluatorDouble + window frame: PRECEDING(MAX)~ + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _wcol0 (type: double) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 6 + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Group By Operator + keys: _col0 (type: string), _col1 (type: bigint) + mode: hash + outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select key, avg(c) over w from( + SELECT key, count(1)*2 as c from src group by key + UNION ALL + SELECT key, count(1) as c from src group by key +)subq group by key, c +WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select key, avg(c) over w from( + SELECT key, count(1)*2 as c from src group by key + UNION ALL + SELECT key, count(1) as c from src group by key +)subq group by key, c +WINDOW w AS (PARTITION BY key ORDER BY c ROWS UNBOUNDED PRECEDING) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 3.0 +0 4.5 +103 2.0 +103 3.0 +105 1.0 +105 1.5 +11 1.0 +11 1.5 +114 1.0 +114 1.5 +116 1.0 +116 1.5 +118 2.0 +118 3.0 +125 2.0 +125 3.0 +129 2.0 +129 3.0 +134 2.0 +134 3.0 +136 1.0 +136 1.5 +138 4.0 +138 6.0 +143 1.0 +143 1.5 +145 1.0 +145 1.5 +149 2.0 +149 3.0 +15 2.0 +15 3.0 +150 1.0 +150 1.5 +152 2.0 +152 3.0 +156 1.0 +156 1.5 +158 1.0 +158 1.5 +163 1.0 +163 1.5 +165 2.0 +165 3.0 +167 3.0 +167 4.5 +169 4.0 +169 6.0 +17 1.0 +17 1.5 +170 1.0 +170 1.5 +172 2.0 +172 3.0 +174 2.0 +174 3.0 +176 2.0 +176 3.0 +178 1.0 +178 1.5 +181 1.0 +181 1.5 +183 1.0 +183 1.5 +187 3.0 +187 4.5 +189 1.0 +189 1.5 +19 1.0 +19 1.5 +190 1.0 +190 1.5 +192 1.0 +192 1.5 +194 1.0 +194 1.5 +196 1.0 +196 1.5 +2 1.0 +2 1.5 +20 1.0 +20 1.5 +200 2.0 +200 3.0 +202 1.0 +202 1.5 +208 3.0 +208 4.5 +213 2.0 +213 3.0 +217 2.0 +217 3.0 +219 2.0 +219 3.0 +222 1.0 +222 1.5 +224 2.0 +224 3.0 +226 1.0 +226 1.5 +228 1.0 +228 1.5 +233 2.0 +233 3.0 +235 1.0 +235 1.5 +237 2.0 +237 3.0 +239 2.0 +239 3.0 +24 2.0 +24 3.0 +242 2.0 +242 3.0 +244 1.0 +244 1.5 +248 1.0 +248 1.5 +255 2.0 +255 3.0 +257 1.0 +257 1.5 +26 2.0 +26 3.0 +260 1.0 +260 1.5 +262 1.0 +262 1.5 +266 1.0 +266 1.5 +273 3.0 +273 4.5 +275 1.0 +275 1.5 +277 4.0 +277 6.0 +28 1.0 +28 1.5 +280 2.0 +280 3.0 +282 2.0 +282 3.0 +284 1.0 +284 1.5 +286 1.0 +286 1.5 +288 2.0 +288 3.0 +291 1.0 +291 1.5 +305 1.0 +305 1.5 +307 2.0 +307 3.0 +309 2.0 +309 3.0 +310 1.0 +310 1.5 +316 3.0 +316 4.5 +318 3.0 +318 4.5 +321 2.0 +321 3.0 +323 1.0 +323 1.5 +325 2.0 +325 3.0 +327 3.0 +327 4.5 +33 1.0 +33 1.5 +332 1.0 +332 1.5 +336 1.0 +336 1.5 +338 1.0 +338 1.5 +341 1.0 +341 1.5 +345 1.0 +345 1.5 +35 3.0 +35 4.5 +356 1.0 +356 1.5 +365 1.0 +365 1.5 +367 2.0 +367 3.0 +369 3.0 +369 4.5 +37 2.0 +37 3.0 +374 1.0 +374 1.5 +378 1.0 +378 1.5 +389 1.0 +389 1.5 +392 1.0 +392 1.5 +394 1.0 +394 1.5 +396 3.0 +396 4.5 +4 1.0 +4 1.5 +400 1.0 +400 1.5 +402 1.0 +402 1.5 +404 2.0 +404 3.0 +406 4.0 +406 6.0 +411 1.0 +411 1.5 +413 2.0 +413 3.0 +417 3.0 +417 4.5 +419 1.0 +419 1.5 +42 2.0 +42 3.0 +424 2.0 +424 3.0 +431 3.0 +431 4.5 +435 1.0 +435 1.5 +437 1.0 +437 1.5 +439 2.0 +439 3.0 +44 1.0 +44 1.5 +444 1.0 +444 1.5 +446 1.0 +446 1.5 +448 1.0 +448 1.5 +453 1.0 +453 1.5 +455 1.0 +455 1.5 +457 1.0 +457 1.5 +459 2.0 +459 3.0 +460 1.0 +460 1.5 +462 2.0 +462 3.0 +466 3.0 +466 4.5 +468 4.0 +468 6.0 +475 1.0 +475 1.5 +477 1.0 +477 1.5 +479 1.0 +479 1.5 +480 3.0 +480 4.5 +482 1.0 +482 1.5 +484 1.0 +484 1.5 +491 1.0 +491 1.5 +493 1.0 +493 1.5 +495 1.0 +495 1.5 +497 1.0 +497 1.5 +51 2.0 +51 3.0 +53 1.0 +53 1.5 +57 1.0 +57 1.5 +64 1.0 +64 1.5 +66 1.0 +66 1.5 +77 1.0 +77 1.5 +8 1.0 +8 1.5 +80 1.0 +80 1.5 +82 1.0 +82 1.5 +84 2.0 +84 3.0 +86 1.0 +86 1.5 +95 2.0 +95 3.0 +97 2.0 +97 3.0 +10 1.0 +10 1.5 +100 2.0 +100 3.0 +104 2.0 +104 3.0 +111 1.0 +111 1.5 +113 2.0 +113 3.0 +119 3.0 +119 4.5 +12 2.0 +12 3.0 +120 2.0 +120 3.0 +126 1.0 +126 1.5 +128 3.0 +128 4.5 +131 1.0 +131 1.5 +133 1.0 +133 1.5 +137 2.0 +137 3.0 +146 2.0 +146 3.0 +153 1.0 +153 1.5 +155 1.0 +155 1.5 +157 1.0 +157 1.5 +160 1.0 +160 1.5 +162 1.0 +162 1.5 +164 2.0 +164 3.0 +166 1.0 +166 1.5 +168 1.0 +168 1.5 +175 2.0 +175 3.0 +177 1.0 +177 1.5 +179 2.0 +179 3.0 +18 2.0 +18 3.0 +180 1.0 +180 1.5 +186 1.0 +186 1.5 +191 2.0 +191 3.0 +193 3.0 +193 4.5 +195 2.0 +195 3.0 +197 2.0 +197 3.0 +199 3.0 +199 4.5 +201 1.0 +201 1.5 +203 2.0 +203 3.0 +205 2.0 +205 3.0 +207 2.0 +207 3.0 +209 2.0 +209 3.0 +214 1.0 +214 1.5 +216 2.0 +216 3.0 +218 1.0 +218 1.5 +221 2.0 +221 3.0 +223 2.0 +223 3.0 +229 2.0 +229 3.0 +230 5.0 +230 7.5 +238 2.0 +238 3.0 +241 1.0 +241 1.5 +247 1.0 +247 1.5 +249 1.0 +249 1.5 +252 1.0 +252 1.5 +256 2.0 +256 3.0 +258 1.0 +258 1.5 +263 1.0 +263 1.5 +265 2.0 +265 3.0 +27 1.0 +27 1.5 +272 2.0 +272 3.0 +274 1.0 +274 1.5 +278 2.0 +278 3.0 +281 2.0 +281 3.0 +283 1.0 +283 1.5 +285 1.0 +285 1.5 +287 1.0 +287 1.5 +289 1.0 +289 1.5 +292 1.0 +292 1.5 +296 1.0 +296 1.5 +298 3.0 +298 4.5 +30 1.0 +30 1.5 +302 1.0 +302 1.5 +306 1.0 +306 1.5 +308 1.0 +308 1.5 +311 3.0 +311 4.5 +315 1.0 +315 1.5 +317 2.0 +317 3.0 +322 2.0 +322 3.0 +331 2.0 +331 3.0 +333 2.0 +333 3.0 +335 1.0 +335 1.5 +339 1.0 +339 1.5 +34 1.0 +34 1.5 +342 2.0 +342 3.0 +344 2.0 +344 3.0 +348 5.0 +348 7.5 +351 1.0 +351 1.5 +353 2.0 +353 3.0 +360 1.0 +360 1.5 +362 1.0 +362 1.5 +364 1.0 +364 1.5 +366 1.0 +366 1.5 +368 1.0 +368 1.5 +373 1.0 +373 1.5 +375 1.0 +375 1.5 +377 1.0 +377 1.5 +379 1.0 +379 1.5 +382 2.0 +382 3.0 +384 3.0 +384 4.5 +386 1.0 +386 1.5 +393 1.0 +393 1.5 +395 2.0 +395 3.0 +397 2.0 +397 3.0 +399 2.0 +399 3.0 +401 5.0 +401 7.5 +403 3.0 +403 4.5 +407 1.0 +407 1.5 +409 3.0 +409 4.5 +41 1.0 +41 1.5 +414 2.0 +414 3.0 +418 1.0 +418 1.5 +421 1.0 +421 1.5 +427 1.0 +427 1.5 +429 2.0 +429 3.0 +43 1.0 +43 1.5 +430 3.0 +430 4.5 +432 1.0 +432 1.5 +436 1.0 +436 1.5 +438 3.0 +438 4.5 +443 1.0 +443 1.5 +449 1.0 +449 1.5 +452 1.0 +452 1.5 +454 3.0 +454 4.5 +458 2.0 +458 3.0 +463 2.0 +463 3.0 +467 1.0 +467 1.5 +469 5.0 +469 7.5 +47 1.0 +47 1.5 +470 1.0 +470 1.5 +472 1.0 +472 1.5 +478 2.0 +478 3.0 +481 1.0 +481 1.5 +483 1.0 +483 1.5 +485 1.0 +485 1.5 +487 1.0 +487 1.5 +489 4.0 +489 6.0 +490 1.0 +490 1.5 +492 2.0 +492 3.0 +494 1.0 +494 1.5 +496 1.0 +496 1.5 +498 3.0 +498 4.5 +5 3.0 +5 4.5 +54 1.0 +54 1.5 +58 2.0 +58 3.0 +65 1.0 +65 1.5 +67 2.0 +67 3.0 +69 1.0 +69 1.5 +70 3.0 +70 4.5 +72 2.0 +72 3.0 +74 1.0 +74 1.5 +76 2.0 +76 3.0 +78 1.0 +78 1.5 +83 2.0 +83 3.0 +85 1.0 +85 1.5 +87 1.0 +87 1.5 +9 1.0 +9 1.5 +90 3.0 +90 4.5 +92 1.0 +92 1.5 +96 1.0 +96 1.5 +98 2.0 +98 3.0 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out index 2d350a3..d8f4c03 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -160,9 +160,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -170,9 +170,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -215,11 +215,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out index e6ab825..994552e 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,22 +52,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -158,11 +158,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -170,11 +170,11 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -217,11 +217,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out index 1eef57a..a16ca19 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,12 +50,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -63,12 +63,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -208,12 +208,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -222,12 +222,12 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -271,11 +271,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out index 21e0876..08f28e5 100644 --- a/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out @@ -34,9 +34,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 11 12 -PREHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +PREHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +POSTHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage @@ -112,7 +112,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +PREHOOK: query: create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -121,7 +121,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_date -POSTHOOK: query: create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +POSTHOOK: query: create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -148,7 +148,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_hour -PREHOOK: query: create table srcpart_date_hour as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr +PREHOOK: query: create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -157,7 +157,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_date_hour -POSTHOOK: query: create table srcpart_date_hour as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr +POSTHOOK: query: create table srcpart_date_hour as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -185,10 +185,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_double_hour PREHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -281,7 +281,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -290,7 +290,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -300,9 +300,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date #### A masked pattern was here #### 1000 -PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -380,7 +380,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -389,7 +389,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -414,11 +414,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 1000 PREHOOK: query: -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -557,7 +557,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -568,7 +568,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -580,10 +580,10 @@ POSTHOOK: Input: default@srcpart_hour #### A masked pattern was here #### 500 PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -693,7 +693,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -704,7 +704,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -727,10 +727,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 PREHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -837,7 +837,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -846,7 +846,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -856,9 +856,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### 500 -PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -936,7 +936,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -945,7 +945,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -967,10 +967,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 PREHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY POSTHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1063,7 +1063,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1072,7 +1072,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1082,9 +1082,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date #### A masked pattern was here #### 0 -PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1162,7 +1162,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1171,7 +1171,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1775,10 +1775,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 1000 Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1876,13 +1876,13 @@ STAGE PLANS: ListSink Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1904,10 +1904,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 1000 Warning: Shuffle Join MERGEJOIN[13][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) PREHOOK: type: QUERY POSTHOOK: query: -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1989,7 +1989,7 @@ STAGE PLANS: ListSink Warning: Shuffle Join MERGEJOIN[13][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1998,7 +1998,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2009,10 +2009,10 @@ POSTHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### 1500 PREHOOK: query: -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr PREHOOK: type: QUERY POSTHOOK: query: -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2125,7 +2125,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2134,7 +2134,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2145,10 +2145,10 @@ POSTHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### 500 PREHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2228,9 +2228,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2323,10 +2323,10 @@ STAGE PLANS: ListSink PREHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2408,11 +2408,11 @@ STAGE PLANS: PREHOOK: query: -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY POSTHOOK: query: -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2550,7 +2550,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2559,7 +2559,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2569,10 +2569,10 @@ POSTHOOK: Input: default@srcpart_hour #### A masked pattern was here #### 500 PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2670,14 +2670,14 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart_date @@ -3299,10 +3299,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 2008-04-09 2008-04-09 PREHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3390,7 +3390,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3399,7 +3399,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3424,11 +3424,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 1000 PREHOOK: query: -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3555,7 +3555,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3566,7 +3566,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3589,10 +3589,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 PREHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3694,7 +3694,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3703,7 +3703,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3725,10 +3725,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 PREHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY POSTHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3817,13 +3817,13 @@ STAGE PLANS: ListSink PREHOOK: query: -- Disabled until TEZ-1486 is fixed --- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -- expressions EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- Disabled until TEZ-1486 is fixed --- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -- expressions EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 @@ -4063,10 +4063,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 1000 Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Map 1' is a cross product PREHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4161,13 +4161,13 @@ STAGE PLANS: ListSink Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Map 1' is a cross product -PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -4188,10 +4188,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4266,9 +4266,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4341,10 +4341,10 @@ STAGE PLANS: ListSink PREHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4426,11 +4426,11 @@ STAGE PLANS: PREHOOK: query: -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY POSTHOOK: query: -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4557,7 +4557,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -4566,7 +4566,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -4576,10 +4576,10 @@ POSTHOOK: Input: default@srcpart_hour #### A masked pattern was here #### 500 PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4659,14 +4659,14 @@ STAGE PLANS: PREHOOK: query: -- Disabled until TEZ-1486 is fixed -- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) --- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +-- where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) PREHOOK: type: QUERY POSTHOOK: query: -- Disabled until TEZ-1486 is fixed -- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) --- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +-- where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) @@ -4885,9 +4885,9 @@ POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=11).key EXPRESSION [(s POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +PREHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09') PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +POSTHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4993,7 +4993,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +PREHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09') PREHOOK: type: QUERY PREHOOK: Input: default@srcpart_date_hour PREHOOK: Input: default@srcpart_orc @@ -5002,7 +5002,7 @@ PREHOOK: Input: default@srcpart_orc@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart_orc@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart_orc@ds=2008-04-09/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +POSTHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09') POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart_date_hour POSTHOOK: Input: default@srcpart_orc diff --git a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out index 32c961d..844ba23 100644 --- a/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_union_group_by.q.out @@ -4,7 +4,7 @@ u bigint, t string, st string ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB") PREHOOK: type: CREATETABLE @@ -16,7 +16,7 @@ u bigint, t string, st string ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB") POSTHOOK: type: CREATETABLE @@ -26,7 +26,7 @@ PREHOOK: query: CREATE TABLE y ( u bigint ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB") PREHOOK: type: CREATETABLE @@ -36,7 +36,7 @@ POSTHOOK: query: CREATE TABLE y ( u bigint ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB") POSTHOOK: type: CREATETABLE @@ -46,7 +46,7 @@ PREHOOK: query: CREATE TABLE z ( u bigint ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB") PREHOOK: type: CREATETABLE @@ -56,7 +56,7 @@ POSTHOOK: query: CREATE TABLE z ( u bigint ) -PARTITIONED BY (date string) +PARTITIONED BY (`date` string) STORED AS ORC TBLPROPERTIES ("orc.compress"="ZLIB") POSTHOOK: type: CREATETABLE @@ -88,14 +88,14 @@ PREHOOK: query: EXPLAIN SELECT o.u, n.u FROM ( -SELECT m.u, Min(date) as ft +SELECT m.u, Min(`date`) as ft FROM ( -SELECT u, date FROM x WHERE date < '2014-09-02' +SELECT u, `date` FROM x WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM y WHERE date < '2014-09-02' +SELECT u, `date` FROM y WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM z WHERE date < '2014-09-02' +SELECT u, `date` FROM z WHERE `date` < '2014-09-02' ) m GROUP BY m.u ) n @@ -105,7 +105,7 @@ SELECT x.u FROM x JOIN v ON (x.t = v.t AND x.st <=> v.st) -WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03' GROUP BY x.u ) o ON n.u = o.u @@ -115,14 +115,14 @@ POSTHOOK: query: EXPLAIN SELECT o.u, n.u FROM ( -SELECT m.u, Min(date) as ft +SELECT m.u, Min(`date`) as ft FROM ( -SELECT u, date FROM x WHERE date < '2014-09-02' +SELECT u, `date` FROM x WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM y WHERE date < '2014-09-02' +SELECT u, `date` FROM y WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM z WHERE date < '2014-09-02' +SELECT u, `date` FROM z WHERE `date` < '2014-09-02' ) m GROUP BY m.u ) n @@ -132,7 +132,7 @@ SELECT x.u FROM x JOIN v ON (x.t = v.t AND x.st <=> v.st) -WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03' GROUP BY x.u ) o ON n.u = o.u @@ -258,14 +258,14 @@ STAGE PLANS: PREHOOK: query: SELECT o.u, n.u FROM ( -SELECT m.u, Min(date) as ft +SELECT m.u, Min(`date`) as ft FROM ( -SELECT u, date FROM x WHERE date < '2014-09-02' +SELECT u, `date` FROM x WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM y WHERE date < '2014-09-02' +SELECT u, `date` FROM y WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM z WHERE date < '2014-09-02' +SELECT u, `date` FROM z WHERE `date` < '2014-09-02' ) m GROUP BY m.u ) n @@ -275,7 +275,7 @@ SELECT x.u FROM x JOIN v ON (x.t = v.t AND x.st <=> v.st) -WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03' GROUP BY x.u ) o ON n.u = o.u @@ -289,14 +289,14 @@ PREHOOK: Input: default@z POSTHOOK: query: SELECT o.u, n.u FROM ( -SELECT m.u, Min(date) as ft +SELECT m.u, Min(`date`) as ft FROM ( -SELECT u, date FROM x WHERE date < '2014-09-02' +SELECT u, `date` FROM x WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM y WHERE date < '2014-09-02' +SELECT u, `date` FROM y WHERE `date` < '2014-09-02' UNION ALL -SELECT u, date FROM z WHERE date < '2014-09-02' +SELECT u, `date` FROM z WHERE `date` < '2014-09-02' ) m GROUP BY m.u ) n @@ -306,7 +306,7 @@ SELECT x.u FROM x JOIN v ON (x.t = v.t AND x.st <=> v.st) -WHERE x.date >= '2014-03-04' AND x.date < '2014-09-03' +WHERE x.`date` >= '2014-03-04' AND x.`date` < '2014-09-03' GROUP BY x.u ) o ON n.u = o.u diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out index 6f2b221..a682a8e 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_10_0.q.out @@ -2,9 +2,9 @@ PREHOOK: query: DROP TABLE IF EXISTS decimal_txt PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS decimal_txt POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS decimal +PREHOOK: query: DROP TABLE IF EXISTS `decimal` PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS decimal +POSTHOOK: query: DROP TABLE IF EXISTS `decimal` POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE decimal_txt (dec decimal) PREHOOK: type: CREATETABLE @@ -22,21 +22,21 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVER POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@decimal_txt -PREHOOK: query: CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt +PREHOOK: query: CREATE TABLE `DECIMAL` STORED AS ORC AS SELECT * FROM decimal_txt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@decimal_txt PREHOOK: Output: database:default PREHOOK: Output: default@DECIMAL -POSTHOOK: query: CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt +POSTHOOK: query: CREATE TABLE `DECIMAL` STORED AS ORC AS SELECT * FROM decimal_txt POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@decimal_txt POSTHOOK: Output: database:default POSTHOOK: Output: default@DECIMAL PREHOOK: query: EXPLAIN -SELECT dec FROM DECIMAL order by dec +SELECT dec FROM `DECIMAL` order by dec PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT dec FROM DECIMAL order by dec +SELECT dec FROM `DECIMAL` order by dec POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -84,11 +84,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT dec FROM DECIMAL order by dec +PREHOOK: query: SELECT dec FROM `DECIMAL` order by dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal #### A masked pattern was here #### -POSTHOOK: query: SELECT dec FROM DECIMAL order by dec +POSTHOOK: query: SELECT dec FROM `DECIMAL` order by dec POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal #### A masked pattern was here #### @@ -102,11 +102,11 @@ POSTHOOK: query: DROP TABLE DECIMAL_txt POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal_txt POSTHOOK: Output: default@decimal_txt -PREHOOK: query: DROP TABLE DECIMAL +PREHOOK: query: DROP TABLE `DECIMAL` PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal PREHOOK: Output: default@decimal -POSTHOOK: query: DROP TABLE DECIMAL +POSTHOOK: query: DROP TABLE `DECIMAL` POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal POSTHOOK: Output: default@decimal diff --git a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out index 6fde788..1607c09 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out @@ -34,9 +34,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 11 12 -PREHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +PREHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as date from srcpart group by ds +POSTHOOK: query: EXPLAIN create table srcpart_date as select ds as ds, ds as `date` from srcpart group by ds POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage @@ -113,7 +113,7 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: create table srcpart_date stored as orc as select ds as ds, ds as date from srcpart group by ds +PREHOOK: query: create table srcpart_date stored as orc as select ds as ds, ds as `date` from srcpart group by ds PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -122,7 +122,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_date -POSTHOOK: query: create table srcpart_date stored as orc as select ds as ds, ds as date from srcpart group by ds +POSTHOOK: query: create table srcpart_date stored as orc as select ds as ds, ds as `date` from srcpart group by ds POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -149,7 +149,7 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_hour -PREHOOK: query: create table srcpart_date_hour stored as orc as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr +PREHOOK: query: create table srcpart_date_hour stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -158,7 +158,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_date_hour -POSTHOOK: query: create table srcpart_date_hour stored as orc as select ds as ds, ds as date, hr as hr, hr as hour from srcpart group by ds, hr +POSTHOOK: query: create table srcpart_date_hour stored as orc as select ds as ds, ds as `date`, hr as hr, hr as hour from srcpart group by ds, hr POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -186,10 +186,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_double_hour PREHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -284,7 +284,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -293,7 +293,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -303,9 +303,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date #### A masked pattern was here #### 1000 -PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -385,7 +385,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -394,7 +394,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -419,11 +419,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 1000 PREHOOK: query: -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -564,7 +564,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -575,7 +575,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -587,10 +587,10 @@ POSTHOOK: Input: default@srcpart_hour #### A masked pattern was here #### 500 PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -702,7 +702,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -713,7 +713,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -736,10 +736,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 PREHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -847,7 +847,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -856,7 +856,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -866,9 +866,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### 500 -PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -947,7 +947,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -956,7 +956,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -978,10 +978,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 PREHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY POSTHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1076,7 +1076,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1085,7 +1085,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1095,9 +1095,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date #### A masked pattern was here #### 0 -PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1177,7 +1177,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1186,7 +1186,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1795,10 +1795,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 1000 Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1898,13 +1898,13 @@ STAGE PLANS: ListSink Warning: Shuffle Join MERGEJOIN[24][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -1926,10 +1926,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 1000 Warning: Shuffle Join MERGEJOIN[13][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product PREHOOK: query: -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) PREHOOK: type: QUERY POSTHOOK: query: -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2012,7 +2012,7 @@ STAGE PLANS: ListSink Warning: Shuffle Join MERGEJOIN[13][tables = [srcpart, srcpart_date_hour]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2021,7 +2021,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2032,10 +2032,10 @@ POSTHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### 1500 PREHOOK: query: -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr PREHOOK: type: QUERY POSTHOOK: query: -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2149,7 +2149,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +PREHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2158,7 +2158,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +POSTHOOK: query: select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2169,10 +2169,10 @@ POSTHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### 500 PREHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2254,9 +2254,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2351,10 +2351,10 @@ STAGE PLANS: ListSink PREHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2438,11 +2438,11 @@ STAGE PLANS: PREHOOK: query: -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY POSTHOOK: query: -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2582,7 +2582,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2591,7 +2591,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -2601,10 +2601,10 @@ POSTHOOK: Input: default@srcpart_hour #### A masked pattern was here #### 500 PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2704,14 +2704,14 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart_date @@ -3337,10 +3337,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 2008-04-09 2008-04-09 PREHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3430,7 +3430,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3439,7 +3439,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3464,11 +3464,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 1000 PREHOOK: query: -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- multiple sources, single key EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3597,7 +3597,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3608,7 +3608,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3631,10 +3631,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 PREHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3737,7 +3737,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3746,7 +3746,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.date = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -3768,10 +3768,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 PREHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY POSTHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST' +EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3862,13 +3862,13 @@ STAGE PLANS: ListSink PREHOOK: query: -- Disabled until TEZ-1486 is fixed --- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -- expressions EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 PREHOOK: type: QUERY POSTHOOK: query: -- Disabled until TEZ-1486 is fixed --- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = 'I DONT EXIST'; +-- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST'; -- expressions EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 @@ -4110,10 +4110,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 1000 Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Map 1' is a cross product PREHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4210,13 +4210,13 @@ STAGE PLANS: ListSink Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Map 1' is a cross product -PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +PREHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as date from srcpart group by ds) s on (srcpart.ds = s.ds) where s.date = '2008-04-08' +POSTHOOK: query: select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -4237,10 +4237,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 1000 PREHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4317,9 +4317,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart_date left join srcpart on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4394,10 +4394,10 @@ STAGE PLANS: ListSink PREHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY POSTHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.date = '2008-04-08' +EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4481,11 +4481,11 @@ STAGE PLANS: PREHOOK: query: -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY POSTHOOK: query: -- with static pruning EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4614,7 +4614,7 @@ STAGE PLANS: ListSink PREHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -4623,7 +4623,7 @@ PREHOOK: Input: default@srcpart_date PREHOOK: Input: default@srcpart_hour #### A masked pattern was here #### POSTHOOK: query: select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 +where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -4633,10 +4633,10 @@ POSTHOOK: Input: default@srcpart_hour #### A masked pattern was here #### 500 PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) -where srcpart_date.date = '2008-04-08' and srcpart.hr = 13 +where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4718,14 +4718,14 @@ STAGE PLANS: PREHOOK: query: -- Disabled until TEZ-1486 is fixed -- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) --- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +-- where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) PREHOOK: type: QUERY POSTHOOK: query: -- Disabled until TEZ-1486 is fixed -- select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) --- where srcpart_date.date = '2008-04-08' and srcpart.hr = 13; +-- where srcpart_date.`date` = '2008-04-08' and srcpart.hr = 13; -- union + subquery EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) @@ -4945,9 +4945,9 @@ POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=11).key EXPRESSION [(s POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=12).key EXPRESSION [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcpart_orc PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +PREHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09') PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +POSTHOOK: query: EXPLAIN select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5053,7 +5053,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +PREHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09') PREHOOK: type: QUERY PREHOOK: Input: default@srcpart_date_hour PREHOOK: Input: default@srcpart_orc @@ -5062,7 +5062,7 @@ PREHOOK: Input: default@srcpart_orc@ds=2008-04-08/hr=12 PREHOOK: Input: default@srcpart_orc@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart_orc@ds=2008-04-09/hr=12 #### A masked pattern was here #### -POSTHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.date = '2008-04-08' or srcpart_date_hour.date = '2008-04-09') +POSTHOOK: query: select count(*) from srcpart_orc join srcpart_date_hour on (srcpart_orc.ds = srcpart_date_hour.ds and srcpart_orc.hr = srcpart_date_hour.hr) where srcpart_date_hour.hour = 11 and (srcpart_date_hour.`date` = '2008-04-08' or srcpart_date_hour.`date` = '2008-04-09') POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart_date_hour POSTHOOK: Input: default@srcpart_orc diff --git a/ql/src/test/results/clientpositive/union_remove_1.q.out b/ql/src/test/results/clientpositive/union_remove_1.q.out index 411f63e..f30f39b 100644 --- a/ql/src/test/results/clientpositive/union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/union_remove_1.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -155,9 +155,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -165,9 +165,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -210,11 +210,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_10.q.out b/ql/src/test/results/clientpositive/union_remove_10.q.out index d80bf32..e720c37 100644 --- a/ql/src/test/results/clientpositive/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/union_remove_10.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,12 +54,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -67,12 +67,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -206,12 +206,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -220,12 +220,12 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -269,11 +269,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_11.q.out b/ql/src/test/results/clientpositive/union_remove_11.q.out index 23ab7c6..571b398 100644 --- a/ql/src/test/results/clientpositive/union_remove_11.q.out +++ b/ql/src/test/results/clientpositive/union_remove_11.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,12 +54,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -67,12 +67,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 values from inputTbl1 + SELECT key, 2 `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -195,12 +195,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -209,12 +209,12 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all select * FROM ( - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -258,11 +258,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_12.q.out b/ql/src/test/results/clientpositive/union_remove_12.q.out index f9fd323..4d6f4a1 100644 --- a/ql/src/test/results/clientpositive/union_remove_12.q.out +++ b/ql/src/test/results/clientpositive/union_remove_12.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,9 +52,9 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -62,9 +62,9 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -192,9 +192,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -203,9 +203,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -249,11 +249,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_13.q.out b/ql/src/test/results/clientpositive/union_remove_13.q.out index b8913e2..a558967 100644 --- a/ql/src/test/results/clientpositive/union_remove_13.q.out +++ b/ql/src/test/results/clientpositive/union_remove_13.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,9 +52,9 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -62,9 +62,9 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -215,9 +215,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -226,9 +226,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -272,11 +272,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_14.q.out b/ql/src/test/results/clientpositive/union_remove_14.q.out index 28e4bb6..1502749 100644 --- a/ql/src/test/results/clientpositive/union_remove_14.q.out +++ b/ql/src/test/results/clientpositive/union_remove_14.q.out @@ -34,11 +34,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -54,9 +54,9 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -64,9 +64,9 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -194,9 +194,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c PREHOOK: type: QUERY @@ -205,9 +205,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, 1 as values from inputTbl1 +select key, 1 as `values` from inputTbl1 union all -select a.key as key, b.val as values +select a.key as key, b.val as `values` FROM inputTbl1 a join inputTbl1 b on a.key=b.key )c POSTHOOK: type: QUERY @@ -251,11 +251,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_15.q.out b/ql/src/test/results/clientpositive/union_remove_15.q.out index faf3e58..2b3c109 100644 --- a/ql/src/test/results/clientpositive/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/union_remove_15.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,18 +52,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -171,9 +171,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -181,9 +181,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -237,12 +237,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -252,12 +252,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/union_remove_16.q.out b/ql/src/test/results/clientpositive/union_remove_16.q.out index 8656c11..ef32e1c 100644 --- a/ql/src/test/results/clientpositive/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/union_remove_16.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -203,9 +203,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -213,9 +213,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, '1' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '1' as ds from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values, '2' as ds from inputTbl1 group by key + SELECT key, count(1) as `values`, '2' as ds from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -269,12 +269,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -284,12 +284,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 3 1 1 7 1 1 8 2 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/union_remove_17.q.out b/ql/src/test/results/clientpositive/union_remove_17.q.out index 386b023..56b22d9 100644 --- a/ql/src/test/results/clientpositive/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/union_remove_17.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -126,9 +126,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -136,9 +136,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, 1 as values, '1' as ds from inputTbl1 + SELECT key, 1 as `values`, '1' as ds from inputTbl1 UNION ALL - SELECT key, 2 as values, '2' as ds from inputTbl1 + SELECT key, 2 as `values`, '2' as ds from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -192,12 +192,12 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@outputtbl1 ds=1 ds=2 -PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '1' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=1 @@ -208,12 +208,12 @@ POSTHOOK: Input: default@outputtbl1@ds=1 7 1 1 8 1 1 8 1 1 -PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '2' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2 diff --git a/ql/src/test/results/clientpositive/union_remove_18.q.out b/ql/src/test/results/clientpositive/union_remove_18.q.out index 0ce6e81..80971dd 100644 --- a/ql/src/test/results/clientpositive/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/union_remove_18.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -169,9 +169,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -179,9 +179,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 partition (ds) SELECT * FROM ( - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds UNION ALL - SELECT key, count(1) as values, ds from inputTbl1 group by key, ds + SELECT key, count(1) as `values`, ds from inputTbl1 group by key, ds ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -251,31 +251,31 @@ ds=13 ds=17 ds=18 ds=28 -PREHOOK: query: select * from outputTbl1 where ds = '11' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '11' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=11 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '11' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '11' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=11 #### A masked pattern was here #### 1 1 11 1 1 11 -PREHOOK: query: select * from outputTbl1 where ds = '18' order by key, values +PREHOOK: query: select * from outputTbl1 where ds = '18' order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=18 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds = '18' order by key, values +POSTHOOK: query: select * from outputTbl1 where ds = '18' order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=18 #### A masked pattern was here #### 8 1 18 8 1 18 -PREHOOK: query: select * from outputTbl1 where ds is not null order by key, values, ds +PREHOOK: query: select * from outputTbl1 where ds is not null order by key, `values`, ds PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=11 @@ -285,7 +285,7 @@ PREHOOK: Input: default@outputtbl1@ds=17 PREHOOK: Input: default@outputtbl1@ds=18 PREHOOK: Input: default@outputtbl1@ds=28 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 where ds is not null order by key, values, ds +POSTHOOK: query: select * from outputTbl1 where ds is not null order by key, `values`, ds POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=11 diff --git a/ql/src/test/results/clientpositive/union_remove_19.q.out b/ql/src/test/results/clientpositive/union_remove_19.q.out index b6d557b..030c5ce 100644 --- a/ql/src/test/results/clientpositive/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/union_remove_19.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,20 +48,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -157,21 +157,21 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -235,21 +235,21 @@ POSTHOOK: Input: default@outputtbl1 PREHOOK: query: -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY POSTHOOK: query: -- filter should be fine explain insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -351,21 +351,21 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values +SELECT a.key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -385,26 +385,26 @@ POSTHOOK: Input: default@outputtbl1 PREHOOK: query: -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY POSTHOOK: query: -- filters and sub-queries should be fine explain insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY @@ -515,26 +515,26 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -select key, values from +select key, `values` from ( -SELECT a.key + a.key as key, a.values +SELECT a.key + a.key as key, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a ) b where b.key >= 7 POSTHOOK: type: QUERY @@ -542,11 +542,11 @@ POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_2.q.out b/ql/src/test/results/clientpositive/union_remove_2.q.out index b9e575b..1593029 100644 --- a/ql/src/test/results/clientpositive/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/union_remove_2.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -162,11 +162,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -174,11 +174,11 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -221,11 +221,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_20.q.out b/ql/src/test/results/clientpositive/union_remove_20.q.out index b2819ce..fc8464d 100644 --- a/ql/src/test/results/clientpositive/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/union_remove_20.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(values bigint, key string) stored as textfile +PREHOOK: query: create table outputTbl1(`values` bigint, key string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(values bigint, key string) stored as textfile +POSTHOOK: query: create table outputTbl1(`values` bigint, key string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,20 +46,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -163,21 +163,21 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.values, a.key +SELECT a.`values`, a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -220,11 +220,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_21.q.out b/ql/src/test/results/clientpositive/union_remove_21.q.out index ac20851..842e67f 100644 --- a/ql/src/test/results/clientpositive/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/union_remove_21.q.out @@ -48,18 +48,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -151,9 +151,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -161,9 +161,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT a.key FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_22.q.out b/ql/src/test/results/clientpositive/union_remove_22.q.out index a8257d0..c9235c1 100644 --- a/ql/src/test/results/clientpositive/union_remove_22.q.out +++ b/ql/src/test/results/clientpositive/union_remove_22.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint, values2 bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint, values2 bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint, values2 bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint, values2 bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -44,20 +44,20 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -161,21 +161,21 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, a.values, a.values +SELECT a.key, a.`values`, a.`values` FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -240,20 +240,20 @@ POSTHOOK: Input: default@outputtbl1 8 2 2 PREHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -357,21 +357,21 @@ STAGE PLANS: name: default.outputtbl1 PREHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 -SELECT a.key, concat(a.values, a.values), concat(a.values, a.values) +SELECT a.key, concat(a.`values`, a.`values`), concat(a.`values`, a.`values`) FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -379,11 +379,11 @@ POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl1.values2 EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_23.q.out b/ql/src/test/results/clientpositive/union_remove_23.q.out index a2ed7f3..96e7d3d 100644 --- a/ql/src/test/results/clientpositive/union_remove_23.q.out +++ b/ql/src/test/results/clientpositive/union_remove_23.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,20 +48,20 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as `values` from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) subq2 PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as `values` from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) subq2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -195,10 +195,10 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as `values` from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) subq2 PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -206,10 +206,10 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from + SELECT key, count(1) as `values` from (SELECT a.key, b.val from inputTbl1 a join inputTbl1 b on a.key=b.key) subq group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) subq2 POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -252,11 +252,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_24.q.out b/ql/src/test/results/clientpositive/union_remove_24.q.out index d0ac662..d9e86bf 100644 --- a/ql/src/test/results/clientpositive/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/union_remove_24.q.out @@ -24,11 +24,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key double, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key double, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key double, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key double, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -44,18 +44,18 @@ PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -161,9 +161,9 @@ STAGE PLANS: PREHOOK: query: INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -171,9 +171,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( - SELECT CAST(key AS DOUBLE) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS DOUBLE) AS key, count(1) as `values` FROM inputTbl1 group by key UNION ALL - SELECT CAST(key AS BIGINT) AS key, count(1) as values FROM inputTbl1 group by key + SELECT CAST(key AS BIGINT) AS key, count(1) as `values` FROM inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -216,11 +216,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_25.q.out b/ql/src/test/results/clientpositive/union_remove_25.q.out index 8c93ead..0435ad5 100644 --- a/ql/src/test/results/clientpositive/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/union_remove_25.q.out @@ -26,27 +26,27 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile +PREHOOK: query: create table outputTbl2(key string, `values` bigint) partitioned by (ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) partitioned by (ds string) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, `values` bigint) partitioned by (ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile +PREHOOK: query: create table outputTbl3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl3 -POSTHOOK: query: create table outputTbl3(key string, values bigint) partitioned by (ds string,hr string) stored as textfile +POSTHOOK: query: create table outputTbl3(key string, `values` bigint) partitioned by (ds string,hr string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl3 @@ -62,18 +62,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -173,9 +173,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -183,9 +183,9 @@ PREHOOK: Output: default@outputtbl1@ds=2004 POSTHOOK: query: insert overwrite table outputTbl1 partition(ds='2004') SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -233,12 +233,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 PREHOOK: Input: default@outputtbl1@ds=2004 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 POSTHOOK: Input: default@outputtbl1@ds=2004 diff --git a/ql/src/test/results/clientpositive/union_remove_3.q.out b/ql/src/test/results/clientpositive/union_remove_3.q.out index e210461..0de46d0 100644 --- a/ql/src/test/results/clientpositive/union_remove_3.q.out +++ b/ql/src/test/results/clientpositive/union_remove_3.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -151,11 +151,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -163,11 +163,11 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -210,11 +210,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_4.q.out b/ql/src/test/results/clientpositive/union_remove_4.q.out index 2291a43..b83a0a8 100644 --- a/ql/src/test/results/clientpositive/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/union_remove_4.q.out @@ -26,11 +26,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -46,18 +46,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -199,9 +199,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -209,9 +209,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -254,11 +254,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_5.q.out b/ql/src/test/results/clientpositive/union_remove_5.q.out index 26179f5..81e64b4 100644 --- a/ql/src/test/results/clientpositive/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/union_remove_5.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,22 +50,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -208,11 +208,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -220,11 +220,11 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -267,11 +267,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_6.q.out b/ql/src/test/results/clientpositive/union_remove_6.q.out index ff23e2d..215e90e 100644 --- a/ql/src/test/results/clientpositive/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6.q.out @@ -20,19 +20,19 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 @@ -46,18 +46,18 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -205,9 +205,9 @@ STAGE PLANS: serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe PREHOOK: query: FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -216,9 +216,9 @@ PREHOOK: Input: default@inputtbl1 PREHOOK: Output: default@outputtbl1 PREHOOK: Output: default@outputtbl2 POSTHOOK: query: FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a insert overwrite table outputTbl1 select * insert overwrite table outputTbl2 select * @@ -230,11 +230,11 @@ POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(n POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### @@ -248,11 +248,11 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: select * from outputTbl2 order by key, values +PREHOOK: query: select * from outputTbl2 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl2 order by key, values +POSTHOOK: query: select * from outputTbl2 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out index 98c0df1..1b93994 100644 --- a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out @@ -20,19 +20,19 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl2 -POSTHOOK: query: create table outputTbl2(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl2(key string, `values` bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 @@ -47,9 +47,9 @@ POSTHOOK: Output: default@inputtbl1 PREHOOK: query: explain FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -58,9 +58,9 @@ PREHOOK: type: QUERY POSTHOOK: query: explain FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -210,9 +210,9 @@ STAGE PLANS: PREHOOK: query: FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -223,9 +223,9 @@ PREHOOK: Output: default@outputtbl1 PREHOOK: Output: default@outputtbl2 POSTHOOK: query: FROM ( select * from( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key )subq ) a insert overwrite table outputTbl1 select * @@ -238,11 +238,11 @@ POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(n POSTHOOK: Lineage: outputtbl1.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] POSTHOOK: Lineage: outputtbl2.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: outputtbl2.values EXPRESSION [(inputtbl1)inputtbl1.null, (inputtbl1)inputtbl1.null, ] -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### @@ -256,11 +256,11 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: select * from outputTbl2 order by key, values +PREHOOK: query: select * from outputTbl2 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl2 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl2 order by key, values +POSTHOOK: query: select * from outputTbl2 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_7.q.out b/ql/src/test/results/clientpositive/union_remove_7.q.out index f0e59cb..4feb092 100644 --- a/ql/src/test/results/clientpositive/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/union_remove_7.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,18 +50,18 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -159,9 +159,9 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -169,9 +169,9 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -214,11 +214,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_8.q.out b/ql/src/test/results/clientpositive/union_remove_8.q.out index 2cba717..5d22aa5 100644 --- a/ql/src/test/results/clientpositive/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/union_remove_8.q.out @@ -32,11 +32,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -52,22 +52,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -166,11 +166,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -178,11 +178,11 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, count(1) as values from inputTbl1 group by key + SELECT key, count(1) as `values` from inputTbl1 group by key UNION ALL - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 @@ -225,11 +225,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_remove_9.q.out b/ql/src/test/results/clientpositive/union_remove_9.q.out index 75925ac..7380d12 100644 --- a/ql/src/test/results/clientpositive/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/union_remove_9.q.out @@ -30,11 +30,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +PREHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as rcfile +POSTHOOK: query: create table outputTbl1(key string, `values` bigint) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -50,12 +50,12 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -63,12 +63,12 @@ POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -209,12 +209,12 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b PREHOOK: type: QUERY @@ -223,12 +223,12 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( -select key, count(1) as values from inputTbl1 group by key +select key, count(1) as `values` from inputTbl1 group by key union all select * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as `values` from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as `values` from inputTbl1 ) a )b POSTHOOK: type: QUERY @@ -272,11 +272,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, `values` PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, `values` POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out b/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out index 1fb0e30..6e8338f 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_10_0.q.out @@ -2,9 +2,9 @@ PREHOOK: query: DROP TABLE IF EXISTS decimal_txt PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS decimal_txt POSTHOOK: type: DROPTABLE -PREHOOK: query: DROP TABLE IF EXISTS decimal +PREHOOK: query: DROP TABLE IF EXISTS `decimal` PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS decimal +POSTHOOK: query: DROP TABLE IF EXISTS `decimal` POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE decimal_txt (dec decimal) PREHOOK: type: CREATETABLE @@ -22,21 +22,21 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVER POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@decimal_txt -PREHOOK: query: CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt +PREHOOK: query: CREATE TABLE `DECIMAL` STORED AS ORC AS SELECT * FROM decimal_txt PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@decimal_txt PREHOOK: Output: database:default PREHOOK: Output: default@DECIMAL -POSTHOOK: query: CREATE TABLE DECIMAL STORED AS ORC AS SELECT * FROM decimal_txt +POSTHOOK: query: CREATE TABLE `DECIMAL` STORED AS ORC AS SELECT * FROM decimal_txt POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@decimal_txt POSTHOOK: Output: database:default POSTHOOK: Output: default@DECIMAL PREHOOK: query: EXPLAIN -SELECT dec FROM DECIMAL order by dec +SELECT dec FROM `DECIMAL` order by dec PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN -SELECT dec FROM DECIMAL order by dec +SELECT dec FROM `DECIMAL` order by dec POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -77,11 +77,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT dec FROM DECIMAL order by dec +PREHOOK: query: SELECT dec FROM `DECIMAL` order by dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal #### A masked pattern was here #### -POSTHOOK: query: SELECT dec FROM DECIMAL order by dec +POSTHOOK: query: SELECT dec FROM `DECIMAL` order by dec POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal #### A masked pattern was here #### @@ -95,11 +95,11 @@ POSTHOOK: query: DROP TABLE DECIMAL_txt POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal_txt POSTHOOK: Output: default@decimal_txt -PREHOOK: query: DROP TABLE DECIMAL +PREHOOK: query: DROP TABLE `DECIMAL` PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal PREHOOK: Output: default@decimal -POSTHOOK: query: DROP TABLE DECIMAL +POSTHOOK: query: DROP TABLE `DECIMAL` POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal POSTHOOK: Output: default@decimal