diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 062e520..c5ea780 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1945,7 +1945,9 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { " none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" + " column: implies column names can contain any character." ), - + HIVE_SUPPORT_SQL11_KEYWORDS("hive.support.sql11.keywords", true, + "This flag should be set to true to enable support for SQL 2011 keywords.\n" + + "The default value is true."), // role names are case-insensitive USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false, "Comma separated list of users who are in admin role for bootstrapping.\n" + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g index b72ee5d..957c094 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11KeywordsForIdentifier() { + return gParent.useSQL11KeywordsForIdentifier(); + } } @rulecatch { @@ -177,7 +180,12 @@ tableSample tableSource @init { gParent.pushMsg("table source", state); } @after { gParent.popMsg(state); } - : tabname=tableName (props=tableProperties)? (ts=tableSample)? (KW_AS? alias=Identifier)? + : tabname=tableName + ((tableProperties) => props=tableProperties)? + ((tableSample) => ts=tableSample)? + ((KW_AS) => (KW_AS alias=Identifier) + | + (Identifier) => (alias=Identifier))? -> ^(TOK_TABREF $tabname $props? $ts? $alias?) ; @@ -232,11 +240,11 @@ partitionedTableFunction @init { gParent.pushMsg("ptf clause", state); } @after { gParent.popMsg(state); } : - name=Identifier - LPAREN KW_ON ptfsrc=partitionTableFunctionSource partitioningSpec? - ((Identifier LPAREN expression RPAREN ) => Identifier LPAREN expression RPAREN ( COMMA Identifier LPAREN expression RPAREN)*)? - RPAREN alias=Identifier? - -> ^(TOK_PTBLFUNCTION $name $alias? partitionTableFunctionSource partitioningSpec? expression*) + name=Identifier LPAREN KW_ON + ((partitionTableFunctionSource) => (ptfsrc=partitionTableFunctionSource spec=partitioningSpec?)) + ((Identifier LPAREN expression RPAREN ) => Identifier LPAREN expression RPAREN ( COMMA Identifier LPAREN expression RPAREN)*)? + ((RPAREN) => (RPAREN)) ((Identifier) => alias=Identifier)? + -> ^(TOK_PTBLFUNCTION $name $alias? $ptfsrc $spec? expression*) ; //----------------------- Rules for parsing whereClause ----------------------------- diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index 20c73cd..1afa26f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -42,7 +42,6 @@ KW_TRUE : 'TRUE'; KW_FALSE : 'FALSE'; KW_ALL : 'ALL'; KW_NONE: 'NONE'; -KW_DEFAULT : 'DEFAULT'; KW_AND : 'AND'; KW_OR : 'OR'; KW_NOT : 'NOT' | '!'; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 149b788..184fa9d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -353,6 +353,8 @@ package org.apache.hadoop.hive.ql.parse; import java.util.Collection; import java.util.HashMap; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; } @@ -369,7 +371,6 @@ import java.util.HashMap; xlateMap.put("KW_FALSE", "FALSE"); xlateMap.put("KW_ALL", "ALL"); xlateMap.put("KW_NONE", "NONE"); - xlateMap.put("KW_DEFAULT", "DEFAULT"); xlateMap.put("KW_AND", "AND"); xlateMap.put("KW_OR", "OR"); xlateMap.put("KW_NOT", "NOT"); @@ -619,6 +620,13 @@ import java.util.HashMap; private CommonTree throwSetOpException() throws RecognitionException { throw new FailedPredicateException(input, "orderByClause clusterByClause distributeByClause sortByClause limitClause can only be applied to the whole union.", ""); } + private Configuration hiveConf; + public void setHiveConf(Configuration hiveConf) { + this.hiveConf = hiveConf; + } + protected boolean useSQL11KeywordsForIdentifier() { + return !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_SUPPORT_SQL11_KEYWORDS); + } } @rulecatch { @@ -954,7 +962,6 @@ alterTableStatementSuffix @init { pushMsg("alter table statement", state); } @after { popMsg(state); } : alterStatementSuffixRename[true] - | alterStatementSuffixUpdateStatsCol | alterStatementSuffixDropPartitions[true] | alterStatementSuffixAddPartitions[true] | alterStatementSuffixTouch diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index d37f49f..3aa9c3d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11KeywordsForIdentifier() { + return gParent.useSQL11KeywordsForIdentifier(); + } } @rulecatch { @@ -51,40 +54,41 @@ groupByClause @after { gParent.popMsg(state); } : KW_GROUP KW_BY - groupByExpression - ( COMMA groupByExpression )* + expression + ( COMMA expression)* ((rollup=KW_WITH KW_ROLLUP) | (cube=KW_WITH KW_CUBE)) ? (sets=KW_GROUPING KW_SETS LPAREN groupingSetExpression ( COMMA groupingSetExpression)* RPAREN ) ? - -> {rollup != null}? ^(TOK_ROLLUP_GROUPBY groupByExpression+) - -> {cube != null}? ^(TOK_CUBE_GROUPBY groupByExpression+) - -> {sets != null}? ^(TOK_GROUPING_SETS groupByExpression+ groupingSetExpression+) - -> ^(TOK_GROUPBY groupByExpression+) + -> {rollup != null}? ^(TOK_ROLLUP_GROUPBY expression+) + -> {cube != null}? ^(TOK_CUBE_GROUPBY expression+) + -> {sets != null}? ^(TOK_GROUPING_SETS expression+ groupingSetExpression+) + -> ^(TOK_GROUPBY expression+) ; groupingSetExpression @init {gParent.pushMsg("grouping set expression", state); } @after {gParent.popMsg(state); } : - groupByExpression - -> ^(TOK_GROUPING_SETS_EXPRESSION groupByExpression) + (LPAREN) => groupingSetExpressionMultiple | + groupingExpressionSingle + ; + +groupingSetExpressionMultiple +@init {gParent.pushMsg("grouping set part expression", state); } +@after {gParent.popMsg(state); } + : LPAREN - groupByExpression (COMMA groupByExpression)* - RPAREN - -> ^(TOK_GROUPING_SETS_EXPRESSION groupByExpression+) - | - LPAREN + expression? (COMMA expression)* RPAREN - -> ^(TOK_GROUPING_SETS_EXPRESSION) + -> ^(TOK_GROUPING_SETS_EXPRESSION expression*) ; - -groupByExpression -@init { gParent.pushMsg("group by expression", state); } +groupingExpressionSingle +@init { gParent.pushMsg("groupingExpression expression", state); } @after { gParent.popMsg(state); } : - expression + expression -> ^(TOK_GROUPING_SETS_EXPRESSION expression) ; havingClause @@ -101,6 +105,26 @@ havingCondition expression ; +expressionsInParenthese + : + LPAREN expression (COMMA expression)* RPAREN -> expression+ + ; + +expressionsNotInParenthese + : + expression (COMMA expression)* -> expression+ + ; + +columnRefOrderInParenthese + : + LPAREN columnRefOrder (COMMA columnRefOrder)* RPAREN -> columnRefOrder+ + ; + +columnRefOrderNotInParenthese + : + columnRefOrder (COMMA columnRefOrder)* -> columnRefOrder+ + ; + // order by a,b orderByClause @init { gParent.pushMsg("order by clause", state); } @@ -108,17 +132,17 @@ orderByClause : KW_ORDER KW_BY columnRefOrder ( COMMA columnRefOrder)* -> ^(TOK_ORDERBY columnRefOrder+) ; - + clusterByClause @init { gParent.pushMsg("cluster by clause", state); } @after { gParent.popMsg(state); } : KW_CLUSTER KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_CLUSTERBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_CLUSTERBY expressionsInParenthese) | - KW_CLUSTER KW_BY - expression - ( (COMMA)=>COMMA expression )* -> ^(TOK_CLUSTERBY expression+) + expressionsNotInParenthese -> ^(TOK_CLUSTERBY expressionsNotInParenthese) + ) ; partitionByClause @@ -126,10 +150,11 @@ partitionByClause @after { gParent.popMsg(state); } : KW_PARTITION KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_DISTRIBUTEBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_DISTRIBUTEBY expressionsInParenthese) | - KW_PARTITION KW_BY - expression ((COMMA)=> COMMA expression)* -> ^(TOK_DISTRIBUTEBY expression+) + expressionsNotInParenthese -> ^(TOK_DISTRIBUTEBY expressionsNotInParenthese) + ) ; distributeByClause @@ -137,10 +162,11 @@ distributeByClause @after { gParent.popMsg(state); } : KW_DISTRIBUTE KW_BY - LPAREN expression (COMMA expression)* RPAREN -> ^(TOK_DISTRIBUTEBY expression+) + ( + (LPAREN) => expressionsInParenthese -> ^(TOK_DISTRIBUTEBY expressionsInParenthese) | - KW_DISTRIBUTE KW_BY - expression ((COMMA)=> COMMA expression)* -> ^(TOK_DISTRIBUTEBY expression+) + expressionsNotInParenthese -> ^(TOK_DISTRIBUTEBY expressionsNotInParenthese) + ) ; sortByClause @@ -148,12 +174,11 @@ sortByClause @after { gParent.popMsg(state); } : KW_SORT KW_BY - LPAREN columnRefOrder - ( COMMA columnRefOrder)* RPAREN -> ^(TOK_SORTBY columnRefOrder+) + ( + (LPAREN) => columnRefOrderInParenthese -> ^(TOK_SORTBY columnRefOrderInParenthese) | - KW_SORT KW_BY - columnRefOrder - ( (COMMA)=> COMMA columnRefOrder)* -> ^(TOK_SORTBY columnRefOrder+) + columnRefOrderNotInParenthese -> ^(TOK_SORTBY columnRefOrderNotInParenthese) + ) ; // fun(par1, par2, par3) @@ -552,6 +577,9 @@ identifier : Identifier | nonReserved -> Identifier[$nonReserved.text] + // If it decides to support SQL11 keywords, i.e., useSQL11KeywordsForIdentifier()=false, + // the sql11keywords in existing q tests should NOT be added back. + | {useSQL11KeywordsForIdentifier()}? sql11keywords -> Identifier[$sql11keywords.text] ; functionIdentifier @@ -572,5 +600,39 @@ principalIdentifier nonReserved : - KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION | KW_VALUES | KW_URI | KW_SERVER + KW_ADD | KW_ADMIN | KW_AFTER | KW_ANALYZE | KW_ARCHIVE | KW_ASC | KW_BEFORE | KW_BUCKET | KW_BUCKETS + | KW_CASCADE | KW_CHANGE | KW_CLUSTER | KW_CLUSTERED | KW_CLUSTERSTATUS | KW_COLLECTION | KW_COLUMNS + | KW_COMMENT | KW_COMPACT | KW_COMPACTIONS | KW_COMPUTE | KW_CONCATENATE | KW_CONTINUE | KW_DATA + | KW_DATABASES | KW_DATETIME | KW_DBPROPERTIES | KW_DEFERRED | KW_DEFINED | KW_DELIMITED | KW_DEPENDENCY + | KW_DESC | KW_DIRECTORIES | KW_DIRECTORY | KW_DISABLE | KW_DISTRIBUTE | KW_ELEM_TYPE | KW_ENABLE + | KW_ESCAPED | KW_EXCLUSIVE | KW_EXPLAIN | KW_EXPORT | KW_FIELDS | KW_FILE | KW_FILEFORMAT + | KW_FIRST | KW_FORMAT | KW_FORMATTED | KW_FUNCTIONS | KW_HOLD_DDLTIME | KW_IDXPROPERTIES | KW_IGNORE + | KW_INDEX | KW_INDEXES | KW_INNER | KW_INPATH | KW_INPUTDRIVER | KW_INPUTFORMAT | KW_ITEMS | KW_JAR + | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_LOAD | KW_LOCATION | KW_LOCK | KW_LOCKS | KW_LOGICAL | KW_LONG + | KW_MAPJOIN | KW_MATERIALIZED | KW_MINUS | KW_MSCK | KW_NOSCAN | KW_NO_DROP | KW_OFFLINE | KW_OPTION + | KW_OUTPUTDRIVER | KW_OUTPUTFORMAT | KW_OVERWRITE | KW_OWNER | KW_PLUS | KW_PRETTY | KW_PRINCIPALS + | KW_PROTECTION | KW_PURGE | KW_READ | KW_READONLY | KW_REBUILD | KW_RECORDREADER | KW_RECORDWRITER + | KW_REGEXP | KW_RENAME | KW_REPAIR | KW_REPLACE | KW_RESTRICT | KW_REWRITE | KW_RLIKE | KW_ROLE | KW_ROLES + | KW_SCHEMA | KW_SCHEMAS | KW_SEMI | KW_SERDE | KW_SERDEPROPERTIES | KW_SERVER | KW_SETS | KW_SHARED + | KW_SHOW | KW_SHOW_DATABASE | KW_SKEWED | KW_SORT | KW_SORTED | KW_SSL | KW_STATISTICS | KW_STORED + | KW_STREAMTABLE | KW_STRING | KW_STRUCT | KW_TABLES | KW_TBLPROPERTIES | KW_TEMPORARY | KW_TERMINATED + | KW_TINYINT | KW_TOUCH | KW_TRANSACTIONS | KW_UNARCHIVE | KW_UNDO | KW_UNIONTYPE | KW_UNLOCK | KW_UNSET + | KW_UNSIGNED | KW_URI | KW_USE | KW_UTC | KW_UTCTIMESTAMP | KW_VALUE_TYPE | KW_VIEW | KW_WHILE + ; + +//The following SQL2011 keywords are used as identifiers in q tests, they may be added back due to backward compatibility. +sql11keywords + : + //array_map_access_nonconstant.q + KW_ARRAY + //keyword_1.q,serde_regex.q,ppd_field_garbage.q + | KW_USER + //date_1.q,date_udf.q,varchar_cast.q,vectorized_casts.q,char_cast.q + | KW_TIMESTAMP + //date_1.q + | KW_DATE + //authorization_set_show_current_role.q + | KW_ALL + //orc_vectorization_ppd.q + | KW_INT ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java index a24cad9..debd5ac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java @@ -193,6 +193,9 @@ public ASTNode parse(String command, Context ctx, boolean setTokenRewriteStream) lexer.setHiveConf(ctx.getConf()); } HiveParser parser = new HiveParser(tokens); + if (ctx != null) { + parser.setHiveConf(ctx.getConf()); + } parser.setTreeAdaptor(adaptor); HiveParser.statement_return r = null; try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g index eba3689..f2d8e1b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SelectClauseParser.g @@ -35,6 +35,9 @@ k=3; RecognitionException e) { gParent.errors.add(new ParseError(gParent, e, tokenNames)); } + protected boolean useSQL11KeywordsForIdentifier() { + return gParent.useSQL11KeywordsForIdentifier(); + } } @rulecatch { @@ -125,10 +128,11 @@ selectItem @init { gParent.pushMsg("selection target", state); } @after { gParent.popMsg(state); } : + (tableAllColumns) => tableAllColumns -> ^(TOK_SELEXPR tableAllColumns) + | ( expression ((KW_AS? identifier) | (KW_AS LPAREN identifier (COMMA identifier)* RPAREN))? ) -> ^(TOK_SELEXPR expression identifier*) - | tableAllColumns -> ^(TOK_SELEXPR tableAllColumns) ; trfmClause @@ -148,7 +152,9 @@ selectExpression @init { gParent.pushMsg("select expression", state); } @after { gParent.popMsg(state); } : - expression | tableAllColumns + (tableAllColumns) => tableAllColumns + | + expression ; selectExpressionList diff --git a/ql/src/test/queries/clientpositive/ambiguitycheck.q b/ql/src/test/queries/clientpositive/ambiguitycheck.q new file mode 100644 index 0000000..9ad239a --- /dev/null +++ b/ql/src/test/queries/clientpositive/ambiguitycheck.q @@ -0,0 +1,30 @@ +set hive.cbo.enable=false; + +-- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) ; + +SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))); + +-- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)); + +SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)); + diff --git a/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q b/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q index 49c1f54..423afb7 100644 --- a/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q +++ b/ql/src/test/queries/clientpositive/array_map_access_nonconstant.q @@ -1,4 +1,6 @@ set hive.fetch.task.conversion=more; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'array' is a keyword in SQL2011 create table array_table (array array, index int ); insert into table array_table select array('first', 'second', 'third'), key%3 from src tablesample (4 rows); diff --git a/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q b/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q index f10b649..75ba2a2 100644 --- a/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q +++ b/ql/src/test/queries/clientpositive/authorization_set_show_current_role.q @@ -3,6 +3,9 @@ set hive.security.authorization.manager=org.apache.hadoop.hive.ql.security.autho set hive.security.authenticator.manager=org.apache.hadoop.hive.ql.security.SessionStateConfigUserAuthenticator; set user.name=hive_admin_user; set role ADMIN; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'all' is a keyword in SQL2011 + show current roles; create role r1; diff --git a/ql/src/test/queries/clientpositive/char_cast.q b/ql/src/test/queries/clientpositive/char_cast.q index 7f44d4d..688bbbe 100644 --- a/ql/src/test/queries/clientpositive/char_cast.q +++ b/ql/src/test/queries/clientpositive/char_cast.q @@ -1,3 +1,5 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 -- Cast from char to other data types select diff --git a/ql/src/test/queries/clientpositive/date_1.q b/ql/src/test/queries/clientpositive/date_1.q index 7d89ac9..f22303c 100644 --- a/ql/src/test/queries/clientpositive/date_1.q +++ b/ql/src/test/queries/clientpositive/date_1.q @@ -1,4 +1,6 @@ set hive.fetch.task.conversion=more; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 drop table date_1; diff --git a/ql/src/test/queries/clientpositive/date_udf.q b/ql/src/test/queries/clientpositive/date_udf.q index c55b9f9..ddf37fe 100644 --- a/ql/src/test/queries/clientpositive/date_udf.q +++ b/ql/src/test/queries/clientpositive/date_udf.q @@ -1,3 +1,6 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + drop table date_udf; drop table date_udf_string; drop table date_udf_flight; diff --git a/ql/src/test/queries/clientpositive/decimal_10_0.q b/ql/src/test/queries/clientpositive/decimal_10_0.q index 02b547c..5bf15ca 100644 --- a/ql/src/test/queries/clientpositive/decimal_10_0.q +++ b/ql/src/test/queries/clientpositive/decimal_10_0.q @@ -1,9 +1,9 @@ -DROP TABLE IF EXISTS DECIMAL; +DROP TABLE IF EXISTS DECIMAL_TABLE; -CREATE TABLE DECIMAL (dec decimal); +CREATE TABLE DECIMAL_TABLE (dec decimal); -LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL; +LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL_TABLE; -SELECT dec FROM DECIMAL; +SELECT dec FROM DECIMAL_TABLE; -DROP TABLE DECIMAL; \ No newline at end of file +DROP TABLE DECIMAL_TABLE; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/keyword_1.q b/ql/src/test/queries/clientpositive/keyword_1.q index 2e996af..c96529a 100644 --- a/ql/src/test/queries/clientpositive/keyword_1.q +++ b/ql/src/test/queries/clientpositive/keyword_1.q @@ -1,4 +1,7 @@ +set hive.support.sql11.keywords=false; + -- SORT_BEFORE_DIFF +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 create table test_user (user string, `group` string); grant select on table test_user to user hive_test; diff --git a/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q b/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q deleted file mode 100644 index e33b4bf..0000000 --- a/ql/src/test/queries/clientpositive/nonreserved_keywords_input37.q +++ /dev/null @@ -1,12 +0,0 @@ -CREATE TABLE table(string string) STORED AS TEXTFILE; - -LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table; - -SELECT table, count(1) -FROM -( - FROM table - SELECT TRANSFORM (table.string) - USING 'java -cp ../util/target/classes/ org.apache.hadoop.hive.scripts.extracturl' AS (table, count) -) subq -GROUP BY table; diff --git a/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q b/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q deleted file mode 100644 index 144cfee..0000000 --- a/ql/src/test/queries/clientpositive/nonreserved_keywords_insert_into1.q +++ /dev/null @@ -1,26 +0,0 @@ -DROP TABLE insert; - -CREATE TABLE insert (key INT, as STRING); - -EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert -) t; - -EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -INSERT INTO TABLE insert SELECT * FROM src LIMIT 100; -SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert -) t; - -SELECT COUNT(*) FROM insert; - -EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10; -INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10; -SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert -) t; - - -DROP TABLE insert; diff --git a/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q b/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q index 9bdad86..ba0720a 100644 --- a/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q +++ b/ql/src/test/queries/clientpositive/orc_vectorization_ppd.q @@ -1,3 +1,6 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + -- create table with 1000 rows create table srcorc(key string, value string) stored as textfile; insert overwrite table srcorc select * from src; diff --git a/ql/src/test/queries/clientpositive/ppd_field_garbage.q b/ql/src/test/queries/clientpositive/ppd_field_garbage.q index 23e0778..173d2c7 100644 --- a/ql/src/test/queries/clientpositive/ppd_field_garbage.q +++ b/ql/src/test/queries/clientpositive/ppd_field_garbage.q @@ -1,3 +1,5 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 -- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>); CREATE VIEW v_test_issue AS SELECT fileid, i.user, test_c.user_c.age FROM test_issue LATERAL VIEW explode(infos) info AS i; diff --git a/ql/src/test/queries/clientpositive/serde_regex.q b/ql/src/test/queries/clientpositive/serde_regex.q index accdb54..5eee853 100644 --- a/ql/src/test/queries/clientpositive/serde_regex.q +++ b/ql/src/test/queries/clientpositive/serde_regex.q @@ -1,3 +1,6 @@ +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 + EXPLAIN CREATE TABLE serde_regex( host STRING, diff --git a/ql/src/test/queries/clientpositive/union_remove_3.q b/ql/src/test/queries/clientpositive/union_remove_3.q index 7e1b113..63b5183 100644 --- a/ql/src/test/queries/clientpositive/union_remove_3.q +++ b/ql/src/test/queries/clientpositive/union_remove_3.q @@ -19,7 +19,7 @@ set mapred.input.dir.recursive=true; -- to run the test only on hadoop 23 create table inputTbl1(key string, val string) stored as textfile; -create table outputTbl1(key string, values bigint) stored as textfile; +create table outputTbl1(key string, vals bigint) stored as textfile; load data local inpath '../../data/files/T1.txt' into table inputTbl1; @@ -27,25 +27,25 @@ explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a; insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a; desc formatted outputTbl1; set hive.input.format=org.apache.hadoop.hive.ql.io.HiveInputFormat; -select * from outputTbl1 order by key, values; +select * from outputTbl1 order by key, vals; diff --git a/ql/src/test/queries/clientpositive/varchar_cast.q b/ql/src/test/queries/clientpositive/varchar_cast.q index c356b1d..f428eab 100644 --- a/ql/src/test/queries/clientpositive/varchar_cast.q +++ b/ql/src/test/queries/clientpositive/varchar_cast.q @@ -1,4 +1,6 @@ set hive.fetch.task.conversion=more; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 -- Cast from varchar to other data types select diff --git a/ql/src/test/queries/clientpositive/vectorized_casts.q b/ql/src/test/queries/clientpositive/vectorized_casts.q index 3f818b1..f5618f3 100644 --- a/ql/src/test/queries/clientpositive/vectorized_casts.q +++ b/ql/src/test/queries/clientpositive/vectorized_casts.q @@ -1,4 +1,6 @@ SET hive.vectorized.execution.enabled = true; +set hive.support.sql11.keywords=false; +-- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 -- Test type casting in vectorized mode to verify end-to-end functionality. diff --git a/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out b/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out index 4808433..618cba6 100644 --- a/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out +++ b/ql/src/test/results/clientnegative/authorization_cannot_create_none_role.q.out @@ -2,6 +2,4 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: create role None -PREHOOK: type: CREATEROLE -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Role name cannot be one of the reserved roles: [ALL, DEFAULT, NONE] +FAILED: ParseException line 2:12 cannot recognize input near 'None' '' '' in create role diff --git a/ql/src/test/results/clientpositive/ambiguitycheck.q.out b/ql/src/test/results/clientpositive/ambiguitycheck.q.out new file mode 100644 index 0000000..614e6ba --- /dev/null +++ b/ql/src/test/results/clientpositive/ambiguitycheck.q.out @@ -0,0 +1,707 @@ +PREHOOK: query: -- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- check cluster/distribute/partitionBy +SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),value) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,(value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY ((key),(((value)))) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +20 val_20 +PREHOOK: query: -- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: -- HIVE-6950 +SELECT tab1.key, + tab1.value, + SUM(1) +FROM src as tab1 +GROUP BY tab1.key, + tab1.value +GROUPING SETS ((tab1.key, tab1.value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 3 +10 val_10 1 +100 val_100 2 +103 val_103 2 +104 val_104 2 +105 val_105 1 +11 val_11 1 +111 val_111 1 +113 val_113 2 +114 val_114 1 +116 val_116 1 +118 val_118 2 +119 val_119 3 +12 val_12 2 +120 val_120 2 +125 val_125 2 +126 val_126 1 +128 val_128 3 +129 val_129 2 +131 val_131 1 +133 val_133 1 +134 val_134 2 +136 val_136 1 +137 val_137 2 +138 val_138 4 +143 val_143 1 +145 val_145 1 +146 val_146 2 +149 val_149 2 +15 val_15 2 +150 val_150 1 +152 val_152 2 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 2 +165 val_165 2 +166 val_166 1 +167 val_167 3 +168 val_168 1 +169 val_169 4 +17 val_17 1 +170 val_170 1 +172 val_172 2 +174 val_174 2 +175 val_175 2 +176 val_176 2 +177 val_177 1 +178 val_178 1 +179 val_179 2 +18 val_18 2 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 3 +189 val_189 1 +19 val_19 1 +190 val_190 1 +191 val_191 2 +192 val_192 1 +193 val_193 3 +194 val_194 1 +195 val_195 2 +196 val_196 1 +197 val_197 2 +199 val_199 3 +2 val_2 1 +20 val_20 1 +200 val_200 2 +201 val_201 1 +202 val_202 1 +203 val_203 2 +205 val_205 2 +207 val_207 2 +208 val_208 3 +209 val_209 2 +213 val_213 2 +214 val_214 1 +216 val_216 2 +217 val_217 2 +218 val_218 1 +219 val_219 2 +221 val_221 2 +222 val_222 1 +223 val_223 2 +224 val_224 2 +226 val_226 1 +228 val_228 1 +229 val_229 2 +230 val_230 5 +233 val_233 2 +235 val_235 1 +237 val_237 2 +238 val_238 2 +239 val_239 2 +24 val_24 2 +241 val_241 1 +242 val_242 2 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 2 +256 val_256 2 +257 val_257 1 +258 val_258 1 +26 val_26 2 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 2 +266 val_266 1 +27 val_27 1 +272 val_272 2 +273 val_273 3 +274 val_274 1 +275 val_275 1 +277 val_277 4 +278 val_278 2 +28 val_28 1 +280 val_280 2 +281 val_281 2 +282 val_282 2 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 2 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 3 +30 val_30 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 2 +308 val_308 1 +309 val_309 2 +310 val_310 1 +311 val_311 3 +315 val_315 1 +316 val_316 3 +317 val_317 2 +318 val_318 3 +321 val_321 2 +322 val_322 2 +323 val_323 1 +325 val_325 2 +327 val_327 3 +33 val_33 1 +331 val_331 2 +332 val_332 1 +333 val_333 2 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +34 val_34 1 +341 val_341 1 +342 val_342 2 +344 val_344 2 +345 val_345 1 +348 val_348 5 +35 val_35 3 +351 val_351 1 +353 val_353 2 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 2 +368 val_368 1 +369 val_369 3 +37 val_37 2 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 2 +384 val_384 3 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 2 +396 val_396 3 +397 val_397 2 +399 val_399 2 +4 val_4 1 +400 val_400 1 +401 val_401 5 +402 val_402 1 +403 val_403 3 +404 val_404 2 +406 val_406 4 +407 val_407 1 +409 val_409 3 +41 val_41 1 +411 val_411 1 +413 val_413 2 +414 val_414 2 +417 val_417 3 +418 val_418 1 +419 val_419 1 +42 val_42 2 +421 val_421 1 +424 val_424 2 +427 val_427 1 +429 val_429 2 +43 val_43 1 +430 val_430 3 +431 val_431 3 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 3 +439 val_439 2 +44 val_44 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 3 +455 val_455 1 +457 val_457 1 +458 val_458 2 +459 val_459 2 +460 val_460 1 +462 val_462 2 +463 val_463 2 +466 val_466 3 +467 val_467 1 +468 val_468 4 +469 val_469 5 +47 val_47 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 2 +479 val_479 1 +480 val_480 3 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 4 +490 val_490 1 +491 val_491 1 +492 val_492 2 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 3 +5 val_5 3 +51 val_51 2 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 2 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 2 +69 val_69 1 +70 val_70 3 +72 val_72 2 +74 val_74 1 +76 val_76 2 +77 val_77 1 +78 val_78 1 +8 val_8 1 +80 val_80 1 +82 val_82 1 +83 val_83 2 +84 val_84 2 +85 val_85 1 +86 val_86 1 +87 val_87 1 +9 val_9 1 +90 val_90 3 +92 val_92 1 +95 val_95 2 +96 val_96 1 +97 val_97 2 +98 val_98 2 +PREHOOK: query: SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)) +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT key, + src.value, + SUM(1) +FROM src +GROUP BY key, + src.value +GROUPING SETS ((key, src.value)) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 3 +10 val_10 1 +100 val_100 2 +103 val_103 2 +104 val_104 2 +105 val_105 1 +11 val_11 1 +111 val_111 1 +113 val_113 2 +114 val_114 1 +116 val_116 1 +118 val_118 2 +119 val_119 3 +12 val_12 2 +120 val_120 2 +125 val_125 2 +126 val_126 1 +128 val_128 3 +129 val_129 2 +131 val_131 1 +133 val_133 1 +134 val_134 2 +136 val_136 1 +137 val_137 2 +138 val_138 4 +143 val_143 1 +145 val_145 1 +146 val_146 2 +149 val_149 2 +15 val_15 2 +150 val_150 1 +152 val_152 2 +153 val_153 1 +155 val_155 1 +156 val_156 1 +157 val_157 1 +158 val_158 1 +160 val_160 1 +162 val_162 1 +163 val_163 1 +164 val_164 2 +165 val_165 2 +166 val_166 1 +167 val_167 3 +168 val_168 1 +169 val_169 4 +17 val_17 1 +170 val_170 1 +172 val_172 2 +174 val_174 2 +175 val_175 2 +176 val_176 2 +177 val_177 1 +178 val_178 1 +179 val_179 2 +18 val_18 2 +180 val_180 1 +181 val_181 1 +183 val_183 1 +186 val_186 1 +187 val_187 3 +189 val_189 1 +19 val_19 1 +190 val_190 1 +191 val_191 2 +192 val_192 1 +193 val_193 3 +194 val_194 1 +195 val_195 2 +196 val_196 1 +197 val_197 2 +199 val_199 3 +2 val_2 1 +20 val_20 1 +200 val_200 2 +201 val_201 1 +202 val_202 1 +203 val_203 2 +205 val_205 2 +207 val_207 2 +208 val_208 3 +209 val_209 2 +213 val_213 2 +214 val_214 1 +216 val_216 2 +217 val_217 2 +218 val_218 1 +219 val_219 2 +221 val_221 2 +222 val_222 1 +223 val_223 2 +224 val_224 2 +226 val_226 1 +228 val_228 1 +229 val_229 2 +230 val_230 5 +233 val_233 2 +235 val_235 1 +237 val_237 2 +238 val_238 2 +239 val_239 2 +24 val_24 2 +241 val_241 1 +242 val_242 2 +244 val_244 1 +247 val_247 1 +248 val_248 1 +249 val_249 1 +252 val_252 1 +255 val_255 2 +256 val_256 2 +257 val_257 1 +258 val_258 1 +26 val_26 2 +260 val_260 1 +262 val_262 1 +263 val_263 1 +265 val_265 2 +266 val_266 1 +27 val_27 1 +272 val_272 2 +273 val_273 3 +274 val_274 1 +275 val_275 1 +277 val_277 4 +278 val_278 2 +28 val_28 1 +280 val_280 2 +281 val_281 2 +282 val_282 2 +283 val_283 1 +284 val_284 1 +285 val_285 1 +286 val_286 1 +287 val_287 1 +288 val_288 2 +289 val_289 1 +291 val_291 1 +292 val_292 1 +296 val_296 1 +298 val_298 3 +30 val_30 1 +302 val_302 1 +305 val_305 1 +306 val_306 1 +307 val_307 2 +308 val_308 1 +309 val_309 2 +310 val_310 1 +311 val_311 3 +315 val_315 1 +316 val_316 3 +317 val_317 2 +318 val_318 3 +321 val_321 2 +322 val_322 2 +323 val_323 1 +325 val_325 2 +327 val_327 3 +33 val_33 1 +331 val_331 2 +332 val_332 1 +333 val_333 2 +335 val_335 1 +336 val_336 1 +338 val_338 1 +339 val_339 1 +34 val_34 1 +341 val_341 1 +342 val_342 2 +344 val_344 2 +345 val_345 1 +348 val_348 5 +35 val_35 3 +351 val_351 1 +353 val_353 2 +356 val_356 1 +360 val_360 1 +362 val_362 1 +364 val_364 1 +365 val_365 1 +366 val_366 1 +367 val_367 2 +368 val_368 1 +369 val_369 3 +37 val_37 2 +373 val_373 1 +374 val_374 1 +375 val_375 1 +377 val_377 1 +378 val_378 1 +379 val_379 1 +382 val_382 2 +384 val_384 3 +386 val_386 1 +389 val_389 1 +392 val_392 1 +393 val_393 1 +394 val_394 1 +395 val_395 2 +396 val_396 3 +397 val_397 2 +399 val_399 2 +4 val_4 1 +400 val_400 1 +401 val_401 5 +402 val_402 1 +403 val_403 3 +404 val_404 2 +406 val_406 4 +407 val_407 1 +409 val_409 3 +41 val_41 1 +411 val_411 1 +413 val_413 2 +414 val_414 2 +417 val_417 3 +418 val_418 1 +419 val_419 1 +42 val_42 2 +421 val_421 1 +424 val_424 2 +427 val_427 1 +429 val_429 2 +43 val_43 1 +430 val_430 3 +431 val_431 3 +432 val_432 1 +435 val_435 1 +436 val_436 1 +437 val_437 1 +438 val_438 3 +439 val_439 2 +44 val_44 1 +443 val_443 1 +444 val_444 1 +446 val_446 1 +448 val_448 1 +449 val_449 1 +452 val_452 1 +453 val_453 1 +454 val_454 3 +455 val_455 1 +457 val_457 1 +458 val_458 2 +459 val_459 2 +460 val_460 1 +462 val_462 2 +463 val_463 2 +466 val_466 3 +467 val_467 1 +468 val_468 4 +469 val_469 5 +47 val_47 1 +470 val_470 1 +472 val_472 1 +475 val_475 1 +477 val_477 1 +478 val_478 2 +479 val_479 1 +480 val_480 3 +481 val_481 1 +482 val_482 1 +483 val_483 1 +484 val_484 1 +485 val_485 1 +487 val_487 1 +489 val_489 4 +490 val_490 1 +491 val_491 1 +492 val_492 2 +493 val_493 1 +494 val_494 1 +495 val_495 1 +496 val_496 1 +497 val_497 1 +498 val_498 3 +5 val_5 3 +51 val_51 2 +53 val_53 1 +54 val_54 1 +57 val_57 1 +58 val_58 2 +64 val_64 1 +65 val_65 1 +66 val_66 1 +67 val_67 2 +69 val_69 1 +70 val_70 3 +72 val_72 2 +74 val_74 1 +76 val_76 2 +77 val_77 1 +78 val_78 1 +8 val_8 1 +80 val_80 1 +82 val_82 1 +83 val_83 2 +84 val_84 2 +85 val_85 1 +86 val_86 1 +87 val_87 1 +9 val_9 1 +90 val_90 3 +92 val_92 1 +95 val_95 2 +96 val_96 1 +97 val_97 2 +98 val_98 2 diff --git a/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out b/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out index 4ac4320..00dd9b1 100644 --- a/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out +++ b/ql/src/test/results/clientpositive/authorization_set_show_current_role.q.out @@ -2,9 +2,13 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: show current roles +PREHOOK: query: -- We need the above setting for backward compatibility because 'all' is a keyword in SQL2011 + +show current roles PREHOOK: type: SHOW_ROLES -POSTHOOK: query: show current roles +POSTHOOK: query: -- We need the above setting for backward compatibility because 'all' is a keyword in SQL2011 + +show current roles POSTHOOK: type: SHOW_ROLES admin PREHOOK: query: create role r1 diff --git a/ql/src/test/results/clientpositive/char_cast.q.out b/ql/src/test/results/clientpositive/char_cast.q.out index 025fedb..f225ced 100644 --- a/ql/src/test/results/clientpositive/char_cast.q.out +++ b/ql/src/test/results/clientpositive/char_cast.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: -- Cast from char to other data types +PREHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +-- Cast from char to other data types select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), @@ -11,7 +13,9 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Cast from char to other data types +POSTHOOK: query: -- We need the above setting for backward compatibility because 'date' is a keyword in SQL2011 + +-- Cast from char to other data types select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), diff --git a/ql/src/test/results/clientpositive/date_1.q.out b/ql/src/test/results/clientpositive/date_1.q.out index df9fc47..51fc29c 100644 --- a/ql/src/test/results/clientpositive/date_1.q.out +++ b/ql/src/test/results/clientpositive/date_1.q.out @@ -1,6 +1,10 @@ -PREHOOK: query: drop table date_1 +PREHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +drop table date_1 PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table date_1 +POSTHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +drop table date_1 POSTHOOK: type: DROPTABLE PREHOOK: query: create table date_1 (d date) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/date_udf.q.out b/ql/src/test/results/clientpositive/date_udf.q.out index 9b37da6..99c650a 100644 --- a/ql/src/test/results/clientpositive/date_udf.q.out +++ b/ql/src/test/results/clientpositive/date_udf.q.out @@ -1,6 +1,10 @@ -PREHOOK: query: drop table date_udf +PREHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +drop table date_udf PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table date_udf +POSTHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +drop table date_udf POSTHOOK: type: DROPTABLE PREHOOK: query: drop table date_udf_string PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/decimal_10_0.q.out b/ql/src/test/results/clientpositive/decimal_10_0.q.out index ae3426c..ca313dd 100644 --- a/ql/src/test/results/clientpositive/decimal_10_0.q.out +++ b/ql/src/test/results/clientpositive/decimal_10_0.q.out @@ -1,38 +1,38 @@ -PREHOOK: query: DROP TABLE IF EXISTS DECIMAL +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TABLE PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TABLE POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE DECIMAL (dec decimal) +PREHOOK: query: CREATE TABLE DECIMAL_TABLE (dec decimal) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@DECIMAL -POSTHOOK: query: CREATE TABLE DECIMAL (dec decimal) +PREHOOK: Output: default@DECIMAL_TABLE +POSTHOOK: query: CREATE TABLE DECIMAL_TABLE (dec decimal) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@DECIMAL -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL +POSTHOOK: Output: default@DECIMAL_TABLE +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL_TABLE PREHOOK: type: LOAD #### A masked pattern was here #### -PREHOOK: Output: default@decimal -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL +PREHOOK: Output: default@decimal_table +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/decimal_10_0.txt' OVERWRITE INTO TABLE DECIMAL_TABLE POSTHOOK: type: LOAD #### A masked pattern was here #### -POSTHOOK: Output: default@decimal -PREHOOK: query: SELECT dec FROM DECIMAL +POSTHOOK: Output: default@decimal_table +PREHOOK: query: SELECT dec FROM DECIMAL_TABLE PREHOOK: type: QUERY -PREHOOK: Input: default@decimal +PREHOOK: Input: default@decimal_table #### A masked pattern was here #### -POSTHOOK: query: SELECT dec FROM DECIMAL +POSTHOOK: query: SELECT dec FROM DECIMAL_TABLE POSTHOOK: type: QUERY -POSTHOOK: Input: default@decimal +POSTHOOK: Input: default@decimal_table #### A masked pattern was here #### 1000000000 NULL -PREHOOK: query: DROP TABLE DECIMAL +PREHOOK: query: DROP TABLE DECIMAL_TABLE PREHOOK: type: DROPTABLE -PREHOOK: Input: default@decimal -PREHOOK: Output: default@decimal -POSTHOOK: query: DROP TABLE DECIMAL +PREHOOK: Input: default@decimal_table +PREHOOK: Output: default@decimal_table +POSTHOOK: query: DROP TABLE DECIMAL_TABLE POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@decimal -POSTHOOK: Output: default@decimal +POSTHOOK: Input: default@decimal_table +POSTHOOK: Output: default@decimal_table diff --git a/ql/src/test/results/clientpositive/keyword_1.q.out b/ql/src/test/results/clientpositive/keyword_1.q.out index 135d8e5..7e80c32 100644 --- a/ql/src/test/results/clientpositive/keyword_1.q.out +++ b/ql/src/test/results/clientpositive/keyword_1.q.out @@ -1,10 +1,12 @@ PREHOOK: query: -- SORT_BEFORE_DIFF +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 create table test_user (user string, `group` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_user POSTHOOK: query: -- SORT_BEFORE_DIFF +-- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 create table test_user (user string, `group` string) POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out b/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out deleted file mode 100644 index 819da22..0000000 --- a/ql/src/test/results/clientpositive/nonreserved_keywords_input37.q.out +++ /dev/null @@ -1,40 +0,0 @@ -PREHOOK: query: CREATE TABLE table(string string) STORED AS TEXTFILE -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@table -POSTHOOK: query: CREATE TABLE table(string string) STORED AS TEXTFILE -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@table -PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@table -POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/docurl.txt' INTO TABLE table -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@table -PREHOOK: query: SELECT table, count(1) -FROM -( - FROM table - SELECT TRANSFORM (table.string) -#### A masked pattern was here #### -) subq -GROUP BY table -PREHOOK: type: QUERY -PREHOOK: Input: default@table -#### A masked pattern was here #### -POSTHOOK: query: SELECT table, count(1) -FROM -( - FROM table - SELECT TRANSFORM (table.string) -#### A masked pattern was here #### -) subq -GROUP BY table -POSTHOOK: type: QUERY -POSTHOOK: Input: default@table -#### A masked pattern was here #### -1uauniajqtunlsvadmxhlxvngxpqjuzbpzvdiwmzphmbaicduzkgxgtdeiunduosu.html 4 -4uzsbtwvdypfitqfqdjosynqp.html 4 diff --git a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out b/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out deleted file mode 100644 index 9f075f1..0000000 --- a/ql/src/test/results/clientpositive/nonreserved_keywords_insert_into1.q.out +++ /dev/null @@ -1,281 +0,0 @@ -PREHOOK: query: DROP TABLE insert -PREHOOK: type: DROPTABLE -POSTHOOK: query: DROP TABLE insert -POSTHOOK: type: DROPTABLE -PREHOOK: query: CREATE TABLE insert (key INT, as STRING) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@insert -POSTHOOK: query: CREATE TABLE insert (key INT, as STRING) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@insert -PREHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-2 - Stats-Aggr Operator - -PREHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@insert -POSTHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert -POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert -) t -PREHOOK: type: QUERY -PREHOOK: Input: default@insert -#### A masked pattern was here #### -POSTHOOK: query: SELECT SUM(HASH(hash)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (hash) FROM insert -) t -POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert -#### A masked pattern was here #### -10226524244 -PREHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 100 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 100 Data size: 1000 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-2 - Stats-Aggr Operator - -PREHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@insert -POSTHOOK: query: INSERT INTO TABLE insert SELECT * FROM src LIMIT 100 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert -POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert -) t -PREHOOK: type: QUERY -PREHOOK: Input: default@insert -#### A masked pattern was here #### -POSTHOOK: query: SELECT SUM(HASH(sum)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (sum) FROM insert -) t -POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert -#### A masked pattern was here #### -20453048488 -PREHOOK: query: SELECT COUNT(*) FROM insert -PREHOOK: type: QUERY -PREHOOK: Input: default@insert -#### A masked pattern was here #### -POSTHOOK: query: SELECT COUNT(*) FROM insert -POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert -#### A masked pattern was here #### -200 -PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 -PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - sort order: - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 10 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-0 - Move Operator - tables: - replace: true - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.insert - - Stage: Stage-2 - Stats-Aggr Operator - -PREHOOK: query: INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@insert -POSTHOOK: query: INSERT OVERWRITE TABLE insert SELECT * FROM src LIMIT 10 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@insert -POSTHOOK: Lineage: insert.as SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -POSTHOOK: Lineage: insert.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert -) t -PREHOOK: type: QUERY -PREHOOK: Input: default@insert -#### A masked pattern was here #### -POSTHOOK: query: SELECT SUM(HASH(add)) FROM ( - SELECT TRANSFORM(*) USING 'tr \t _' AS (add) FROM insert -) t -POSTHOOK: type: QUERY -POSTHOOK: Input: default@insert -#### A masked pattern was here #### --826625916 -PREHOOK: query: DROP TABLE insert -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@insert -PREHOOK: Output: default@insert -POSTHOOK: query: DROP TABLE insert -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@insert -POSTHOOK: Output: default@insert diff --git a/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out b/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out index 738abc4..4b958cc 100644 --- a/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out +++ b/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out @@ -1,9 +1,13 @@ -PREHOOK: query: -- create table with 1000 rows +PREHOOK: query: -- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + +-- create table with 1000 rows create table srcorc(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcorc -POSTHOOK: query: -- create table with 1000 rows +POSTHOOK: query: -- We need the above setting for backward compatibility because 'int' is a keyword in SQL2011 + +-- create table with 1000 rows create table srcorc(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/ppd_field_garbage.q.out b/ql/src/test/results/clientpositive/ppd_field_garbage.q.out index 86eca5b..1ce7a39 100644 --- a/ql/src/test/results/clientpositive/ppd_field_garbage.q.out +++ b/ql/src/test/results/clientpositive/ppd_field_garbage.q.out @@ -1,9 +1,11 @@ -PREHOOK: query: -- ppd leaves invalid expr in field expr +PREHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 +-- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_issue -POSTHOOK: query: -- ppd leaves invalid expr in field expr +POSTHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 +-- ppd leaves invalid expr in field expr CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/serde_regex.q.out b/ql/src/test/results/clientpositive/serde_regex.q.out index 19187ba..d807ab3 100644 --- a/ql/src/test/results/clientpositive/serde_regex.q.out +++ b/ql/src/test/results/clientpositive/serde_regex.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: EXPLAIN +PREHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 + +EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, @@ -15,7 +17,9 @@ WITH SERDEPROPERTIES ( ) STORED AS TEXTFILE PREHOOK: type: CREATETABLE -POSTHOOK: query: EXPLAIN +POSTHOOK: query: -- We need the above setting for backward compatibility because 'user' is a keyword in SQL2011 + +EXPLAIN CREATE TABLE serde_regex( host STRING, identity STRING, diff --git a/ql/src/test/results/clientpositive/union_remove_3.q.out b/ql/src/test/results/clientpositive/union_remove_3.q.out index e210461..8376364 100644 --- a/ql/src/test/results/clientpositive/union_remove_3.q.out +++ b/ql/src/test/results/clientpositive/union_remove_3.q.out @@ -28,11 +28,11 @@ create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 -PREHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +PREHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@outputTbl1 -POSTHOOK: query: create table outputTbl1(key string, values bigint) stored as textfile +POSTHOOK: query: create table outputTbl1(key string, vals bigint) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 @@ -48,22 +48,22 @@ PREHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a PREHOOK: type: QUERY POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -151,11 +151,11 @@ STAGE PLANS: PREHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a PREHOOK: type: QUERY PREHOOK: Input: default@inputtbl1 @@ -163,17 +163,17 @@ PREHOOK: Output: default@outputtbl1 POSTHOOK: query: insert overwrite table outputTbl1 SELECT * FROM ( - SELECT key, 1 as values from inputTbl1 + SELECT key, 1 as vals from inputTbl1 UNION ALL - SELECT key, 2 as values from inputTbl1 + SELECT key, 2 as vals from inputTbl1 UNION ALL - SELECT key, 3 as values from inputTbl1 + SELECT key, 3 as vals from inputTbl1 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@inputtbl1 POSTHOOK: Output: default@outputtbl1 POSTHOOK: Lineage: outputtbl1.key EXPRESSION [(inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), (inputtbl1)inputtbl1.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: outputtbl1.values EXPRESSION [] +POSTHOOK: Lineage: outputtbl1.vals EXPRESSION [] PREHOOK: query: desc formatted outputTbl1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@outputtbl1 @@ -183,7 +183,7 @@ POSTHOOK: Input: default@outputtbl1 # col_name data_type comment key string -values bigint +vals bigint # Detailed Table Information Database: default @@ -210,11 +210,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: select * from outputTbl1 order by key, values +PREHOOK: query: select * from outputTbl1 order by key, vals PREHOOK: type: QUERY PREHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -POSTHOOK: query: select * from outputTbl1 order by key, values +POSTHOOK: query: select * from outputTbl1 order by key, vals POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/varchar_cast.q.out b/ql/src/test/results/clientpositive/varchar_cast.q.out index 5a968f2..975b08a 100644 --- a/ql/src/test/results/clientpositive/varchar_cast.q.out +++ b/ql/src/test/results/clientpositive/varchar_cast.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: -- Cast from varchar to other data types +PREHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +-- Cast from varchar to other data types select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), @@ -11,7 +13,9 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Cast from varchar to other data types +POSTHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +-- Cast from varchar to other data types select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), diff --git a/ql/src/test/results/clientpositive/vectorized_casts.q.out b/ql/src/test/results/clientpositive/vectorized_casts.q.out index f915200..4708c65 100644 --- a/ql/src/test/results/clientpositive/vectorized_casts.q.out +++ b/ql/src/test/results/clientpositive/vectorized_casts.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: -- Test type casting in vectorized mode to verify end-to-end functionality. +PREHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +-- Test type casting in vectorized mode to verify end-to-end functionality. explain select @@ -72,7 +74,9 @@ from alltypesorc -- limit output to a reasonably small number of rows where cbigint % 250 = 0 PREHOOK: type: QUERY -POSTHOOK: query: -- Test type casting in vectorized mode to verify end-to-end functionality. +POSTHOOK: query: -- We need the above setting for backward compatibility because 'timestamp' is a keyword in SQL2011 + +-- Test type casting in vectorized mode to verify end-to-end functionality. explain select